From 6ca7a0dea1692fb5ed1e71be5de8a08a199cc484 Mon Sep 17 00:00:00 2001 From: Khurram Baig Date: Fri, 4 Oct 2024 14:18:18 +0530 Subject: [PATCH] Vendor the VFS library and dependencies This library will be leveraged for rewriting Caching for Go and S3 --- go.mod | 25 +- go.sum | 128 +- .../Azure/azure-pipeline-go/LICENSE | 21 + .../Azure/azure-pipeline-go/pipeline/core.go | 284 + .../azure-pipeline-go/pipeline/defaultlog.go | 14 + .../pipeline/defaultlog_syslog.go | 33 + .../pipeline/defaultlog_windows.go | 61 + .../Azure/azure-pipeline-go/pipeline/doc.go | 161 + .../Azure/azure-pipeline-go/pipeline/error.go | 184 + .../azure-pipeline-go/pipeline/progress.go | 82 + .../azure-pipeline-go/pipeline/request.go | 147 + .../azure-pipeline-go/pipeline/response.go | 74 + .../azure-pipeline-go/pipeline/version.go | 9 + .../Azure/azure-storage-blob-go/LICENSE | 21 + .../azblob/access_conditions.go | 65 + .../azblob/bytes_writer.go | 24 + .../azblob/chunkwriting.go | 215 + .../azblob/common_utils.go | 1 + .../azure-storage-blob-go/azblob/highlevel.go | 569 + .../azblob/parsing_urls.go | 173 + .../azblob/request_common.go | 56 + .../azblob/sas_service.go | 371 + .../azblob/section_writer.go | 47 + .../azblob/service_codes_blob.go | 198 + .../azblob/storage_account_credential.go | 8 + .../azblob/url_append_blob.go | 161 + .../azure-storage-blob-go/azblob/url_blob.go | 363 + .../azblob/url_block_blob.go | 182 + .../azblob/url_container.go | 319 + .../azblob/url_page_blob.go | 276 + .../azblob/url_service.go | 177 + .../azblob/user_delegation_credential.go | 38 + .../azure-storage-blob-go/azblob/version.go | 3 + .../azblob/zc_credential_anonymous.go | 55 + .../azblob/zc_credential_shared_key.go | 205 + .../azblob/zc_credential_token.go | 146 + .../azblob/zc_pipeline.go | 45 + .../azblob/zc_policy_request_log.go | 194 + .../azblob/zc_policy_retry.go | 419 + .../azblob/zc_policy_telemetry.go | 51 + .../azblob/zc_policy_unique_request_id.go | 36 + .../azblob/zc_retry_reader.go | 186 + .../azblob/zc_sas_account.go | 244 + .../azblob/zc_sas_query_params.go | 393 + .../azblob/zc_service_codes_common.go | 134 + .../azblob/zc_storage_error.go | 111 + .../azblob/zc_util_validate.go | 64 + .../azure-storage-blob-go/azblob/zc_uuid.go | 77 + .../azure-storage-blob-go/azblob/zt_doc.go | 89 + .../azblob/zz_generated_append_blob.go | 532 + .../azblob/zz_generated_blob.go | 2063 + .../azblob/zz_generated_block_blob.go | 848 + .../azblob/zz_generated_client.go | 38 + .../azblob/zz_generated_container.go | 1232 + .../azblob/zz_generated_models.go | 7302 +++ .../azblob/zz_generated_page_blob.go | 1065 + .../azblob/zz_generated_responder_policy.go | 74 + .../azblob/zz_generated_response_error.go | 95 + .../azblob/zz_generated_service.go | 618 + .../azblob/zz_generated_validation.go | 367 + .../azblob/zz_generated_version.go | 14 + .../azblob/zz_response_helpers.go | 240 + .../github.com/Azure/go-autorest/.gitignore | 32 + .../github.com/Azure/go-autorest/CHANGELOG.md | 1004 + .../github.com/Azure/go-autorest/GNUmakefile | 23 + .../github.com/Azure/go-autorest/Gopkg.lock | 324 + .../github.com/Azure/go-autorest/Gopkg.toml | 59 + vendor/github.com/Azure/go-autorest/LICENSE | 191 + vendor/github.com/Azure/go-autorest/README.md | 165 + .../Azure/go-autorest/autorest/LICENSE | 191 + .../Azure/go-autorest/autorest/adal/LICENSE | 191 + .../Azure/go-autorest/autorest/adal/README.md | 294 + .../Azure/go-autorest/autorest/adal/config.go | 151 + .../go-autorest/autorest/adal/devicetoken.go | 273 + .../autorest/adal/go_mod_tidy_hack.go | 25 + .../go-autorest/autorest/adal/persist.go | 134 + .../Azure/go-autorest/autorest/adal/sender.go | 101 + .../Azure/go-autorest/autorest/adal/token.go | 1429 + .../go-autorest/autorest/adal/token_1.13.go | 76 + .../go-autorest/autorest/adal/token_legacy.go | 75 + .../go-autorest/autorest/adal/version.go | 45 + .../go-autorest/autorest/authorization.go | 353 + .../go-autorest/autorest/authorization_sas.go | 66 + .../autorest/authorization_storage.go | 307 + .../Azure/go-autorest/autorest/autorest.go | 150 + .../Azure/go-autorest/autorest/azure/async.go | 995 + .../Azure/go-autorest/autorest/azure/azure.go | 388 + .../autorest/azure/environments.go | 331 + .../autorest/azure/metadata_environment.go | 245 + .../Azure/go-autorest/autorest/azure/rp.go | 204 + .../Azure/go-autorest/autorest/client.go | 328 + .../Azure/go-autorest/autorest/date/LICENSE | 191 + .../Azure/go-autorest/autorest/date/date.go | 96 + .../autorest/date/go_mod_tidy_hack.go | 24 + .../Azure/go-autorest/autorest/date/time.go | 103 + .../go-autorest/autorest/date/timerfc1123.go | 100 + .../go-autorest/autorest/date/unixtime.go | 123 + .../go-autorest/autorest/date/utility.go | 25 + .../Azure/go-autorest/autorest/error.go | 103 + .../go-autorest/autorest/go_mod_tidy_hack.go | 25 + .../Azure/go-autorest/autorest/preparer.go | 549 + .../Azure/go-autorest/autorest/responder.go | 269 + .../go-autorest/autorest/retriablerequest.go | 52 + .../autorest/retriablerequest_1.7.go | 55 + .../autorest/retriablerequest_1.8.go | 67 + .../Azure/go-autorest/autorest/sender.go | 458 + .../Azure/go-autorest/autorest/utility.go | 232 + .../go-autorest/autorest/utility_1.13.go | 30 + .../go-autorest/autorest/utility_legacy.go | 32 + .../Azure/go-autorest/autorest/version.go | 41 + .../Azure/go-autorest/azure-pipelines.yml | 105 + vendor/github.com/Azure/go-autorest/doc.go | 18 + .../Azure/go-autorest/logger/LICENSE | 191 + .../go-autorest/logger/go_mod_tidy_hack.go | 24 + .../Azure/go-autorest/logger/logger.go | 337 + .../Azure/go-autorest/tracing/LICENSE | 191 + .../go-autorest/tracing/go_mod_tidy_hack.go | 24 + .../Azure/go-autorest/tracing/tracing.go | 67 + vendor/github.com/aws/aws-sdk-go/LICENSE.txt | 202 + vendor/github.com/aws/aws-sdk-go/NOTICE.txt | 3 + .../github.com/aws/aws-sdk-go/aws/arn/arn.go | 93 + .../aws/aws-sdk-go/aws/auth/bearer/token.go | 50 + .../aws/aws-sdk-go/aws/awserr/error.go | 164 + .../aws/aws-sdk-go/aws/awserr/types.go | 221 + .../aws/aws-sdk-go/aws/awsutil/copy.go | 108 + .../aws/aws-sdk-go/aws/awsutil/equal.go | 27 + .../aws/aws-sdk-go/aws/awsutil/path_value.go | 221 + .../aws/aws-sdk-go/aws/awsutil/prettify.go | 123 + .../aws-sdk-go/aws/awsutil/string_value.go | 90 + .../aws/aws-sdk-go/aws/client/client.go | 94 + .../aws-sdk-go/aws/client/default_retryer.go | 177 + .../aws/aws-sdk-go/aws/client/logger.go | 206 + .../aws/client/metadata/client_info.go | 15 + .../aws-sdk-go/aws/client/no_op_retryer.go | 28 + .../github.com/aws/aws-sdk-go/aws/config.go | 670 + .../aws/aws-sdk-go/aws/context_1_5.go | 38 + .../aws/aws-sdk-go/aws/context_1_9.go | 12 + .../aws-sdk-go/aws/context_background_1_5.go | 23 + .../aws-sdk-go/aws/context_background_1_7.go | 21 + .../aws/aws-sdk-go/aws/context_sleep.go | 24 + .../aws/aws-sdk-go/aws/convert_types.go | 918 + .../aws/corehandlers/awsinternal.go | 4 + .../aws-sdk-go/aws/corehandlers/handlers.go | 232 + .../aws/corehandlers/param_validator.go | 17 + .../aws-sdk-go/aws/corehandlers/user_agent.go | 47 + .../aws/credentials/chain_provider.go | 100 + .../credentials/context_background_go1.5.go | 23 + .../credentials/context_background_go1.7.go | 21 + .../aws/credentials/context_go1.5.go | 40 + .../aws/credentials/context_go1.9.go | 14 + .../aws-sdk-go/aws/credentials/credentials.go | 383 + .../ec2rolecreds/ec2_role_provider.go | 188 + .../aws/credentials/endpointcreds/provider.go | 255 + .../aws/credentials/env_provider.go | 74 + .../aws-sdk-go/aws/credentials/example.ini | 12 + .../aws/credentials/processcreds/provider.go | 438 + .../shared_credentials_provider.go | 151 + .../aws/credentials/ssocreds/doc.go | 60 + .../aws-sdk-go/aws/credentials/ssocreds/os.go | 10 + .../aws/credentials/ssocreds/os_windows.go | 7 + .../aws/credentials/ssocreds/provider.go | 187 + .../credentials/ssocreds/sso_cached_token.go | 237 + .../credentials/ssocreds/token_provider.go | 148 + .../aws/credentials/static_provider.go | 57 + .../stscreds/assume_role_provider.go | 371 + .../stscreds/web_identity_provider.go | 182 + .../github.com/aws/aws-sdk-go/aws/csm/doc.go | 69 + .../aws/aws-sdk-go/aws/csm/enable.go | 89 + .../aws/aws-sdk-go/aws/csm/metric.go | 109 + .../aws/aws-sdk-go/aws/csm/metric_chan.go | 55 + .../aws-sdk-go/aws/csm/metric_exception.go | 26 + .../aws/aws-sdk-go/aws/csm/reporter.go | 264 + .../aws/aws-sdk-go/aws/defaults/defaults.go | 252 + .../aws-sdk-go/aws/defaults/shared_config.go | 27 + vendor/github.com/aws/aws-sdk-go/aws/doc.go | 56 + .../aws/aws-sdk-go/aws/ec2metadata/api.go | 250 + .../aws/aws-sdk-go/aws/ec2metadata/service.go | 245 + .../aws/ec2metadata/token_provider.go | 99 + .../aws/aws-sdk-go/aws/endpoints/decode.go | 193 + .../aws/aws-sdk-go/aws/endpoints/defaults.go | 48609 ++++++++++++++++ .../aws/endpoints/dep_service_ids.go | 141 + .../aws/aws-sdk-go/aws/endpoints/doc.go | 65 + .../aws/aws-sdk-go/aws/endpoints/endpoints.go | 708 + .../aws/endpoints/legacy_regions.go | 24 + .../aws/aws-sdk-go/aws/endpoints/v3model.go | 594 + .../aws/endpoints/v3model_codegen.go | 412 + .../github.com/aws/aws-sdk-go/aws/errors.go | 13 + .../aws/aws-sdk-go/aws/jsonvalue.go | 12 + .../github.com/aws/aws-sdk-go/aws/logger.go | 121 + .../aws/request/connection_reset_error.go | 19 + .../aws/aws-sdk-go/aws/request/handlers.go | 346 + .../aws-sdk-go/aws/request/http_request.go | 24 + .../aws-sdk-go/aws/request/offset_reader.go | 65 + .../aws/aws-sdk-go/aws/request/request.go | 722 + .../aws/aws-sdk-go/aws/request/request_1_7.go | 40 + .../aws/aws-sdk-go/aws/request/request_1_8.go | 37 + .../aws-sdk-go/aws/request/request_context.go | 15 + .../aws/request/request_context_1_6.go | 15 + .../aws/request/request_pagination.go | 266 + .../aws/aws-sdk-go/aws/request/retryer.go | 309 + .../aws/request/timeout_read_closer.go | 94 + .../aws/aws-sdk-go/aws/request/validation.go | 286 + .../aws/aws-sdk-go/aws/request/waiter.go | 304 + .../aws/aws-sdk-go/aws/session/credentials.go | 333 + .../aws/session/custom_transport.go | 28 + .../aws/session/custom_transport_go1.12.go | 27 + .../aws/session/custom_transport_go1.5.go | 23 + .../aws/session/custom_transport_go1.6.go | 24 + .../aws/aws-sdk-go/aws/session/doc.go | 367 + .../aws/aws-sdk-go/aws/session/env_config.go | 499 + .../aws/aws-sdk-go/aws/session/session.go | 1005 + .../aws-sdk-go/aws/session/shared_config.go | 856 + .../aws-sdk-go/aws/signer/v4/header_rules.go | 81 + .../aws/aws-sdk-go/aws/signer/v4/options.go | 7 + .../aws/signer/v4/request_context_go1.5.go | 14 + .../aws/signer/v4/request_context_go1.7.go | 14 + .../aws/aws-sdk-go/aws/signer/v4/stream.go | 63 + .../aws/aws-sdk-go/aws/signer/v4/uri_path.go | 25 + .../aws/aws-sdk-go/aws/signer/v4/v4.go | 857 + vendor/github.com/aws/aws-sdk-go/aws/types.go | 264 + vendor/github.com/aws/aws-sdk-go/aws/url.go | 13 + .../github.com/aws/aws-sdk-go/aws/url_1_7.go | 30 + .../github.com/aws/aws-sdk-go/aws/version.go | 8 + .../internal/context/background_go1.5.go | 41 + .../aws/aws-sdk-go/internal/ini/ast.go | 120 + .../aws-sdk-go/internal/ini/comma_token.go | 11 + .../aws-sdk-go/internal/ini/comment_token.go | 35 + .../aws/aws-sdk-go/internal/ini/doc.go | 42 + .../aws-sdk-go/internal/ini/empty_token.go | 4 + .../aws/aws-sdk-go/internal/ini/expression.go | 24 + .../aws/aws-sdk-go/internal/ini/fuzz.go | 18 + .../aws/aws-sdk-go/internal/ini/ini.go | 51 + .../aws/aws-sdk-go/internal/ini/ini_lexer.go | 165 + .../aws/aws-sdk-go/internal/ini/ini_parser.go | 350 + .../aws-sdk-go/internal/ini/literal_tokens.go | 337 + .../aws-sdk-go/internal/ini/newline_token.go | 30 + .../aws-sdk-go/internal/ini/number_helper.go | 152 + .../aws/aws-sdk-go/internal/ini/op_tokens.go | 39 + .../aws-sdk-go/internal/ini/parse_error.go | 43 + .../aws-sdk-go/internal/ini/parse_stack.go | 60 + .../aws/aws-sdk-go/internal/ini/sep_tokens.go | 41 + .../aws/aws-sdk-go/internal/ini/skipper.go | 45 + .../aws/aws-sdk-go/internal/ini/statement.go | 35 + .../aws/aws-sdk-go/internal/ini/value_util.go | 284 + .../aws/aws-sdk-go/internal/ini/visitor.go | 169 + .../aws/aws-sdk-go/internal/ini/walker.go | 25 + .../aws/aws-sdk-go/internal/ini/ws_token.go | 24 + .../internal/s3shared/arn/accesspoint_arn.go | 50 + .../aws-sdk-go/internal/s3shared/arn/arn.go | 94 + .../internal/s3shared/arn/outpost_arn.go | 126 + .../s3shared/arn/s3_object_lambda_arn.go | 15 + .../internal/s3shared/endpoint_errors.go | 202 + .../internal/s3shared/resource_request.go | 45 + .../internal/s3shared/s3err/error.go | 57 + .../aws/aws-sdk-go/internal/sdkio/byte.go | 12 + .../aws/aws-sdk-go/internal/sdkio/io_go1.6.go | 11 + .../aws/aws-sdk-go/internal/sdkio/io_go1.7.go | 13 + .../aws/aws-sdk-go/internal/sdkmath/floor.go | 16 + .../internal/sdkmath/floor_go1.9.go | 57 + .../internal/sdkrand/locked_source.go | 29 + .../aws/aws-sdk-go/internal/sdkrand/read.go | 12 + .../aws-sdk-go/internal/sdkrand/read_1_5.go | 25 + .../aws/aws-sdk-go/internal/sdkuri/path.go | 23 + .../internal/shareddefaults/ecs_container.go | 12 + .../internal/shareddefaults/shared_config.go | 46 + .../shared_config_resolve_home.go | 18 + .../shared_config_resolve_home_go1.12.go | 13 + .../aws-sdk-go/internal/strings/strings.go | 11 + .../internal/sync/singleflight/LICENSE | 27 + .../sync/singleflight/singleflight.go | 120 + .../private/checksum/content_md5.go | 53 + .../private/protocol/eventstream/debug.go | 144 + .../private/protocol/eventstream/decode.go | 216 + .../private/protocol/eventstream/encode.go | 162 + .../private/protocol/eventstream/error.go | 23 + .../eventstream/eventstreamapi/error.go | 81 + .../eventstream/eventstreamapi/reader.go | 173 + .../eventstream/eventstreamapi/shared.go | 23 + .../eventstream/eventstreamapi/signer.go | 123 + .../eventstreamapi/stream_writer.go | 129 + .../eventstream/eventstreamapi/transport.go | 10 + .../eventstreamapi/transport_go1.17.go | 19 + .../eventstream/eventstreamapi/writer.go | 63 + .../private/protocol/eventstream/header.go | 175 + .../protocol/eventstream/header_value.go | 506 + .../private/protocol/eventstream/message.go | 117 + .../aws/aws-sdk-go/private/protocol/host.go | 104 + .../private/protocol/host_prefix.go | 54 + .../private/protocol/idempotency.go | 75 + .../private/protocol/json/jsonutil/build.go | 309 + .../protocol/json/jsonutil/unmarshal.go | 317 + .../private/protocol/jsonrpc/jsonrpc.go | 87 + .../protocol/jsonrpc/unmarshal_error.go | 160 + .../aws-sdk-go/private/protocol/jsonvalue.go | 76 + .../aws-sdk-go/private/protocol/payload.go | 81 + .../aws-sdk-go/private/protocol/protocol.go | 49 + .../private/protocol/query/build.go | 36 + .../protocol/query/queryutil/queryutil.go | 276 + .../private/protocol/query/unmarshal.go | 39 + .../private/protocol/query/unmarshal_error.go | 70 + .../aws-sdk-go/private/protocol/rest/build.go | 353 + .../private/protocol/rest/payload.go | 54 + .../private/protocol/rest/unmarshal.go | 276 + .../private/protocol/restjson/restjson.go | 59 + .../protocol/restjson/unmarshal_error.go | 157 + .../private/protocol/restxml/restxml.go | 79 + .../aws-sdk-go/private/protocol/timestamp.go | 134 + .../aws-sdk-go/private/protocol/unmarshal.go | 27 + .../private/protocol/unmarshal_error.go | 65 + .../private/protocol/xml/xmlutil/build.go | 345 + .../private/protocol/xml/xmlutil/sort.go | 32 + .../private/protocol/xml/xmlutil/unmarshal.go | 311 + .../protocol/xml/xmlutil/xml_to_struct.go | 173 + .../aws/aws-sdk-go/service/s3/api.go | 45973 +++++++++++++++ .../aws/aws-sdk-go/service/s3/body_hash.go | 202 + .../aws-sdk-go/service/s3/bucket_location.go | 107 + .../aws-sdk-go/service/s3/customizations.go | 89 + .../aws/aws-sdk-go/service/s3/doc.go | 26 + .../aws/aws-sdk-go/service/s3/doc_custom.go | 109 + .../aws/aws-sdk-go/service/s3/endpoint.go | 298 + .../aws-sdk-go/service/s3/endpoint_builder.go | 239 + .../aws/aws-sdk-go/service/s3/errors.go | 69 + .../service/s3/host_style_bucket.go | 136 + .../service/s3/platform_handlers.go | 9 + .../service/s3/platform_handlers_go1.6.go | 29 + .../service/s3/s3iface/interface.go | 490 + .../aws-sdk-go/service/s3/s3manager/arn.go | 23 + .../aws-sdk-go/service/s3/s3manager/batch.go | 532 + .../service/s3/s3manager/bucket_region.go | 159 + .../s3/s3manager/buffered_read_seeker.go | 81 + .../s3manager/default_read_seeker_write_to.go | 8 + .../default_read_seeker_write_to_windows.go | 5 + .../s3/s3manager/default_writer_read_from.go | 8 + .../default_writer_read_from_windows.go | 5 + .../aws-sdk-go/service/s3/s3manager/doc.go | 3 + .../service/s3/s3manager/download.go | 608 + .../aws-sdk-go/service/s3/s3manager/pool.go | 252 + .../s3/s3manager/read_seeker_write_to.go | 65 + .../s3/s3manager/s3manageriface/interface.go | 46 + .../aws-sdk-go/service/s3/s3manager/upload.go | 797 + .../service/s3/s3manager/upload_input.go | 380 + .../service/s3/s3manager/writer_read_from.go | 83 + .../aws/aws-sdk-go/service/s3/service.go | 108 + .../aws/aws-sdk-go/service/s3/sse.go | 84 + .../aws-sdk-go/service/s3/statusok_error.go | 47 + .../aws-sdk-go/service/s3/unmarshal_error.go | 114 + .../aws/aws-sdk-go/service/s3/waiters.go | 214 + .../aws/aws-sdk-go/service/sso/api.go | 1367 + .../aws/aws-sdk-go/service/sso/doc.go | 45 + .../aws/aws-sdk-go/service/sso/errors.go | 44 + .../aws/aws-sdk-go/service/sso/service.go | 106 + .../service/sso/ssoiface/interface.go | 86 + .../aws/aws-sdk-go/service/ssooidc/api.go | 2406 + .../aws/aws-sdk-go/service/ssooidc/doc.go | 67 + .../aws/aws-sdk-go/service/ssooidc/errors.go | 123 + .../aws/aws-sdk-go/service/ssooidc/service.go | 106 + .../aws/aws-sdk-go/service/sts/api.go | 3553 ++ .../aws-sdk-go/service/sts/customizations.go | 11 + .../aws/aws-sdk-go/service/sts/doc.go | 31 + .../aws/aws-sdk-go/service/sts/errors.go | 84 + .../aws/aws-sdk-go/service/sts/service.go | 104 + .../service/sts/stsiface/interface.go | 96 + vendor/github.com/c2fo/vfs/v6/.gitignore | 2 + vendor/github.com/c2fo/vfs/v6/.golangci.yml | 765 + vendor/github.com/c2fo/vfs/v6/.gvmrc | 1 + vendor/github.com/c2fo/vfs/v6/.mockery.yaml | 15 + vendor/github.com/c2fo/vfs/v6/CHANGELOG.md | 396 + .../github.com/c2fo/vfs/v6/CODE_OF_CONDUCT.md | 128 + vendor/github.com/c2fo/vfs/v6/License.md | 21 + vendor/github.com/c2fo/vfs/v6/README.md | 494 + .../github.com/c2fo/vfs/v6/backend/all/all.go | 12 + .../c2fo/vfs/v6/backend/azure/client.go | 271 + .../c2fo/vfs/v6/backend/azure/doc.go | 63 + .../c2fo/vfs/v6/backend/azure/file.go | 384 + .../c2fo/vfs/v6/backend/azure/fileSystem.go | 156 + .../c2fo/vfs/v6/backend/azure/location.go | 206 + .../c2fo/vfs/v6/backend/azure/mock_client.go | 97 + .../c2fo/vfs/v6/backend/azure/mock_token.go | 15 + .../c2fo/vfs/v6/backend/azure/options.go | 91 + .../c2fo/vfs/v6/backend/azure/properties.go | 29 + .../c2fo/vfs/v6/backend/azure/token.go | 55 + .../github.com/c2fo/vfs/v6/backend/backend.go | 56 + vendor/github.com/c2fo/vfs/v6/backend/doc.go | 76 + .../c2fo/vfs/v6/backend/ftp/dataconn.go | 220 + .../github.com/c2fo/vfs/v6/backend/ftp/doc.go | 152 + .../c2fo/vfs/v6/backend/ftp/errors.go | 9 + .../c2fo/vfs/v6/backend/ftp/file.go | 456 + .../c2fo/vfs/v6/backend/ftp/fileSystem.go | 151 + .../c2fo/vfs/v6/backend/ftp/location.go | 232 + .../c2fo/vfs/v6/backend/ftp/options.go | 188 + .../c2fo/vfs/v6/backend/ftp/types/types.go | 50 + .../vfs/v6/backend/gs/bucketHandleWrapper.go | 83 + .../github.com/c2fo/vfs/v6/backend/gs/doc.go | 74 + .../github.com/c2fo/vfs/v6/backend/gs/file.go | 757 + .../c2fo/vfs/v6/backend/gs/fileSystem.go | 145 + .../c2fo/vfs/v6/backend/gs/location.go | 217 + .../vfs/v6/backend/gs/objectHandleWrapper.go | 161 + .../c2fo/vfs/v6/backend/gs/options.go | 36 + .../github.com/c2fo/vfs/v6/backend/helpers.go | 23 + .../github.com/c2fo/vfs/v6/backend/mem/doc.go | 23 + .../c2fo/vfs/v6/backend/mem/file.go | 549 + .../c2fo/vfs/v6/backend/mem/fileSystem.go | 181 + .../c2fo/vfs/v6/backend/mem/location.go | 232 + .../vfs/v6/backend/mem/readwriteseeker.go | 81 + .../github.com/c2fo/vfs/v6/backend/os/doc.go | 31 + .../github.com/c2fo/vfs/v6/backend/os/file.go | 497 + .../c2fo/vfs/v6/backend/os/fileSystem.go | 57 + .../c2fo/vfs/v6/backend/os/location.go | 188 + .../github.com/c2fo/vfs/v6/backend/s3/doc.go | 92 + .../github.com/c2fo/vfs/v6/backend/s3/file.go | 860 + .../c2fo/vfs/v6/backend/s3/fileSystem.go | 130 + .../c2fo/vfs/v6/backend/s3/location.go | 212 + .../c2fo/vfs/v6/backend/s3/options.go | 138 + .../c2fo/vfs/v6/backend/sftp/doc.go | 202 + .../c2fo/vfs/v6/backend/sftp/file.go | 570 + .../c2fo/vfs/v6/backend/sftp/fileSystem.go | 200 + .../c2fo/vfs/v6/backend/sftp/location.go | 222 + .../c2fo/vfs/v6/backend/sftp/options.go | 327 + vendor/github.com/c2fo/vfs/v6/doc.go | 142 + vendor/github.com/c2fo/vfs/v6/errors.go | 21 + vendor/github.com/c2fo/vfs/v6/mocks/File.go | 959 + .../c2fo/vfs/v6/mocks/FileSystem.go | 290 + .../github.com/c2fo/vfs/v6/mocks/Location.go | 717 + .../github.com/c2fo/vfs/v6/mocks/Options.go | 32 + vendor/github.com/c2fo/vfs/v6/mocks/Retry.go | 78 + .../c2fo/vfs/v6/mocks/StringFile.go | 89 + .../v6/options/delete/deleteAllVersions.go | 19 + .../github.com/c2fo/vfs/v6/options/options.go | 18 + .../github.com/c2fo/vfs/v6/utils/authority.go | 225 + vendor/github.com/c2fo/vfs/v6/utils/errors.go | 23 + vendor/github.com/c2fo/vfs/v6/utils/utils.go | 264 + vendor/github.com/c2fo/vfs/v6/vfs.go | 247 + .../github.com/c2fo/vfs/v6/vfssimple/doc.go | 114 + .../c2fo/vfs/v6/vfssimple/vfssimple.go | 123 + .../github.com/golang-jwt/jwt/v4/.gitignore | 4 + vendor/github.com/golang-jwt/jwt/v4/LICENSE | 9 + .../golang-jwt/jwt/v4/MIGRATION_GUIDE.md | 22 + vendor/github.com/golang-jwt/jwt/v4/README.md | 138 + .../github.com/golang-jwt/jwt/v4/SECURITY.md | 19 + .../golang-jwt/jwt/v4/VERSION_HISTORY.md | 135 + vendor/github.com/golang-jwt/jwt/v4/claims.go | 269 + vendor/github.com/golang-jwt/jwt/v4/doc.go | 4 + vendor/github.com/golang-jwt/jwt/v4/ecdsa.go | 142 + .../golang-jwt/jwt/v4/ecdsa_utils.go | 69 + .../github.com/golang-jwt/jwt/v4/ed25519.go | 85 + .../golang-jwt/jwt/v4/ed25519_utils.go | 64 + vendor/github.com/golang-jwt/jwt/v4/errors.go | 112 + vendor/github.com/golang-jwt/jwt/v4/hmac.go | 95 + .../golang-jwt/jwt/v4/map_claims.go | 151 + vendor/github.com/golang-jwt/jwt/v4/none.go | 52 + vendor/github.com/golang-jwt/jwt/v4/parser.go | 177 + .../golang-jwt/jwt/v4/parser_option.go | 29 + vendor/github.com/golang-jwt/jwt/v4/rsa.go | 101 + .../github.com/golang-jwt/jwt/v4/rsa_pss.go | 143 + .../github.com/golang-jwt/jwt/v4/rsa_utils.go | 105 + .../golang-jwt/jwt/v4/signing_method.go | 46 + .../golang-jwt/jwt/v4/staticcheck.conf | 1 + vendor/github.com/golang-jwt/jwt/v4/token.go | 143 + vendor/github.com/golang-jwt/jwt/v4/types.go | 145 + vendor/github.com/hashicorp/errwrap/LICENSE | 354 + vendor/github.com/hashicorp/errwrap/README.md | 89 + .../github.com/hashicorp/errwrap/errwrap.go | 178 + .../hashicorp/go-multierror/LICENSE | 353 + .../hashicorp/go-multierror/Makefile | 31 + .../hashicorp/go-multierror/README.md | 150 + .../hashicorp/go-multierror/append.go | 43 + .../hashicorp/go-multierror/flatten.go | 26 + .../hashicorp/go-multierror/format.go | 27 + .../hashicorp/go-multierror/group.go | 38 + .../hashicorp/go-multierror/multierror.go | 121 + .../hashicorp/go-multierror/prefix.go | 37 + .../hashicorp/go-multierror/sort.go | 16 + vendor/github.com/jlaffaye/ftp/LICENSE | 13 + vendor/github.com/jlaffaye/ftp/README.md | 63 + vendor/github.com/jlaffaye/ftp/debug.go | 37 + vendor/github.com/jlaffaye/ftp/ftp.go | 1160 + vendor/github.com/jlaffaye/ftp/parse.go | 277 + vendor/github.com/jlaffaye/ftp/scanner.go | 58 + vendor/github.com/jlaffaye/ftp/status.go | 119 + vendor/github.com/jlaffaye/ftp/walker.go | 98 + .../jmespath/go-jmespath/.gitignore | 4 + .../jmespath/go-jmespath/.travis.yml | 28 + .../github.com/jmespath/go-jmespath/LICENSE | 13 + .../github.com/jmespath/go-jmespath/Makefile | 51 + .../github.com/jmespath/go-jmespath/README.md | 87 + vendor/github.com/jmespath/go-jmespath/api.go | 49 + .../go-jmespath/astnodetype_string.go | 16 + .../jmespath/go-jmespath/functions.go | 842 + .../jmespath/go-jmespath/interpreter.go | 418 + .../github.com/jmespath/go-jmespath/lexer.go | 420 + .../github.com/jmespath/go-jmespath/parser.go | 603 + .../jmespath/go-jmespath/toktype_string.go | 16 + .../github.com/jmespath/go-jmespath/util.go | 185 + vendor/github.com/kr/fs/LICENSE | 27 + vendor/github.com/kr/fs/Readme | 3 + vendor/github.com/kr/fs/filesystem.go | 36 + vendor/github.com/kr/fs/walk.go | 95 + vendor/github.com/mattn/go-ieproxy/.gitignore | 1 + vendor/github.com/mattn/go-ieproxy/LICENSE | 23 + vendor/github.com/mattn/go-ieproxy/README.md | 51 + vendor/github.com/mattn/go-ieproxy/ieproxy.go | 56 + .../mattn/go-ieproxy/ieproxy_darwin.go | 126 + .../mattn/go-ieproxy/ieproxy_ios.go | 15 + .../mattn/go-ieproxy/ieproxy_unix.go | 16 + .../mattn/go-ieproxy/ieproxy_windows.go | 219 + .../mattn/go-ieproxy/kernel32_data_windows.go | 19 + .../github.com/mattn/go-ieproxy/pac_darwin.go | 141 + .../github.com/mattn/go-ieproxy/pac_unix.go | 9 + .../mattn/go-ieproxy/pac_windows.go | 72 + .../mattn/go-ieproxy/proxy_middleman.go | 11 + .../go-ieproxy/proxy_middleman_darwin.go | 46 + .../mattn/go-ieproxy/proxy_middleman_ios.go | 14 + .../mattn/go-ieproxy/proxy_middleman_unix.go | 14 + .../go-ieproxy/proxy_middleman_windows.go | 52 + vendor/github.com/mattn/go-ieproxy/utils.go | 23 + .../mattn/go-ieproxy/winhttp_data_windows.go | 51 + vendor/github.com/pkg/sftp/.gitignore | 10 + vendor/github.com/pkg/sftp/CONTRIBUTORS | 3 + vendor/github.com/pkg/sftp/LICENSE | 9 + vendor/github.com/pkg/sftp/Makefile | 27 + vendor/github.com/pkg/sftp/README.md | 44 + vendor/github.com/pkg/sftp/allocator.go | 96 + vendor/github.com/pkg/sftp/attrs.go | 121 + vendor/github.com/pkg/sftp/attrs_stubs.go | 12 + vendor/github.com/pkg/sftp/attrs_unix.go | 17 + vendor/github.com/pkg/sftp/client.go | 2030 + vendor/github.com/pkg/sftp/conn.go | 183 + vendor/github.com/pkg/sftp/debug.go | 10 + vendor/github.com/pkg/sftp/fuzz.go | 23 + .../internal/encoding/ssh/filexfer/attrs.go | 296 + .../internal/encoding/ssh/filexfer/buffer.go | 340 + .../encoding/ssh/filexfer/extended_packets.go | 143 + .../encoding/ssh/filexfer/extensions.go | 43 + .../encoding/ssh/filexfer/filexfer.go | 54 + .../sftp/internal/encoding/ssh/filexfer/fx.go | 147 + .../internal/encoding/ssh/filexfer/fxp.go | 169 + .../encoding/ssh/filexfer/handle_packets.go | 230 + .../encoding/ssh/filexfer/init_packets.go | 99 + .../encoding/ssh/filexfer/open_packets.go | 86 + .../internal/encoding/ssh/filexfer/packets.go | 273 + .../encoding/ssh/filexfer/path_packets.go | 362 + .../encoding/ssh/filexfer/permissions.go | 114 + .../encoding/ssh/filexfer/response_packets.go | 230 + vendor/github.com/pkg/sftp/ls_formatting.go | 88 + vendor/github.com/pkg/sftp/ls_plan9.go | 22 + vendor/github.com/pkg/sftp/ls_stub.go | 12 + vendor/github.com/pkg/sftp/ls_unix.go | 24 + vendor/github.com/pkg/sftp/match.go | 137 + vendor/github.com/pkg/sftp/packet-manager.go | 216 + vendor/github.com/pkg/sftp/packet-typing.go | 135 + vendor/github.com/pkg/sftp/packet.go | 1295 + vendor/github.com/pkg/sftp/pool.go | 79 + vendor/github.com/pkg/sftp/release.go | 6 + vendor/github.com/pkg/sftp/request-attrs.go | 63 + vendor/github.com/pkg/sftp/request-errors.go | 54 + vendor/github.com/pkg/sftp/request-example.go | 647 + .../github.com/pkg/sftp/request-interfaces.go | 157 + vendor/github.com/pkg/sftp/request-plan9.go | 16 + vendor/github.com/pkg/sftp/request-readme.md | 53 + vendor/github.com/pkg/sftp/request-server.go | 337 + vendor/github.com/pkg/sftp/request-unix.go | 24 + vendor/github.com/pkg/sftp/request.go | 653 + vendor/github.com/pkg/sftp/request_windows.go | 13 + vendor/github.com/pkg/sftp/server.go | 634 + vendor/github.com/pkg/sftp/server_plan9.go | 27 + .../pkg/sftp/server_statvfs_darwin.go | 21 + .../pkg/sftp/server_statvfs_impl.go | 30 + .../pkg/sftp/server_statvfs_linux.go | 23 + .../pkg/sftp/server_statvfs_plan9.go | 13 + .../pkg/sftp/server_statvfs_stubs.go | 16 + vendor/github.com/pkg/sftp/server_unix.go | 16 + vendor/github.com/pkg/sftp/server_windows.go | 39 + vendor/github.com/pkg/sftp/sftp.go | 258 + vendor/github.com/pkg/sftp/stat_plan9.go | 103 + vendor/github.com/pkg/sftp/stat_posix.go | 124 + vendor/github.com/pkg/sftp/syscall_fixed.go | 10 + vendor/github.com/pkg/sftp/syscall_good.go | 10 + .../github.com/stretchr/objx/.codeclimate.yml | 21 + vendor/github.com/stretchr/objx/.gitignore | 11 + vendor/github.com/stretchr/objx/LICENSE | 22 + vendor/github.com/stretchr/objx/README.md | 80 + vendor/github.com/stretchr/objx/Taskfile.yml | 27 + vendor/github.com/stretchr/objx/accessors.go | 197 + .../github.com/stretchr/objx/conversions.go | 280 + vendor/github.com/stretchr/objx/doc.go | 66 + vendor/github.com/stretchr/objx/map.go | 214 + vendor/github.com/stretchr/objx/mutations.go | 77 + vendor/github.com/stretchr/objx/security.go | 12 + vendor/github.com/stretchr/objx/tests.go | 17 + .../github.com/stretchr/objx/type_specific.go | 346 + .../stretchr/objx/type_specific_codegen.go | 2261 + vendor/github.com/stretchr/objx/value.go | 159 + .../github.com/stretchr/testify/mock/doc.go | 44 + .../github.com/stretchr/testify/mock/mock.go | 1241 + vendor/golang.org/x/crypto/blowfish/block.go | 159 + vendor/golang.org/x/crypto/blowfish/cipher.go | 99 + vendor/golang.org/x/crypto/blowfish/const.go | 199 + .../x/crypto/curve25519/curve25519.go | 90 + .../golang.org/x/crypto/pkcs12/bmp-string.go | 50 + vendor/golang.org/x/crypto/pkcs12/crypto.go | 131 + vendor/golang.org/x/crypto/pkcs12/errors.go | 23 + .../x/crypto/pkcs12/internal/rc2/rc2.go | 268 + vendor/golang.org/x/crypto/pkcs12/mac.go | 45 + vendor/golang.org/x/crypto/pkcs12/pbkdf.go | 170 + vendor/golang.org/x/crypto/pkcs12/pkcs12.go | 360 + vendor/golang.org/x/crypto/pkcs12/safebags.go | 57 + vendor/golang.org/x/crypto/ssh/buffer.go | 97 + vendor/golang.org/x/crypto/ssh/certs.go | 611 + vendor/golang.org/x/crypto/ssh/channel.go | 645 + vendor/golang.org/x/crypto/ssh/cipher.go | 789 + vendor/golang.org/x/crypto/ssh/client.go | 282 + vendor/golang.org/x/crypto/ssh/client_auth.go | 791 + vendor/golang.org/x/crypto/ssh/common.go | 476 + vendor/golang.org/x/crypto/ssh/connection.go | 143 + vendor/golang.org/x/crypto/ssh/doc.go | 23 + vendor/golang.org/x/crypto/ssh/handshake.go | 806 + .../ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go | 93 + vendor/golang.org/x/crypto/ssh/kex.go | 786 + vendor/golang.org/x/crypto/ssh/keys.go | 1778 + .../x/crypto/ssh/knownhosts/knownhosts.go | 540 + vendor/golang.org/x/crypto/ssh/mac.go | 68 + vendor/golang.org/x/crypto/ssh/messages.go | 891 + vendor/golang.org/x/crypto/ssh/mux.go | 357 + vendor/golang.org/x/crypto/ssh/server.go | 902 + vendor/golang.org/x/crypto/ssh/session.go | 647 + vendor/golang.org/x/crypto/ssh/ssh_gss.go | 139 + vendor/golang.org/x/crypto/ssh/streamlocal.go | 116 + vendor/golang.org/x/crypto/ssh/tcpip.go | 509 + vendor/golang.org/x/crypto/ssh/transport.go | 380 + vendor/golang.org/x/net/context/context.go | 56 + vendor/golang.org/x/net/context/go17.go | 72 + vendor/golang.org/x/net/context/go19.go | 20 + vendor/golang.org/x/net/context/pre_go17.go | 300 + vendor/golang.org/x/net/context/pre_go19.go | 109 + .../golang.org/x/net/http/httpproxy/proxy.go | 367 + vendor/modules.txt | 141 +- 636 files changed, 222048 insertions(+), 7 deletions(-) create mode 100644 vendor/github.com/Azure/azure-pipeline-go/LICENSE create mode 100644 vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go create mode 100644 vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog.go create mode 100644 vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go create mode 100644 vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go create mode 100644 vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go create mode 100644 vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go create mode 100644 vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go create mode 100644 vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go create mode 100644 vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go create mode 100644 vendor/github.com/Azure/azure-pipeline-go/pipeline/version.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/LICENSE create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/access_conditions.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/bytes_writer.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/chunkwriting.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/common_utils.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/parsing_urls.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/request_common.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/section_writer.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/service_codes_blob.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/storage_account_credential.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/url_blob.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/user_delegation_credential.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_anonymous.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_shared_key.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_token.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_telemetry.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_unique_request_id.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_account.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_service_codes_common.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_storage_error.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_util_validate.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_uuid.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zt_doc.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_blob.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_responder_policy.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_response_error.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_validation.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_version.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_response_helpers.go create mode 100644 vendor/github.com/Azure/go-autorest/.gitignore create mode 100644 vendor/github.com/Azure/go-autorest/CHANGELOG.md create mode 100644 vendor/github.com/Azure/go-autorest/GNUmakefile create mode 100644 vendor/github.com/Azure/go-autorest/Gopkg.lock create mode 100644 vendor/github.com/Azure/go-autorest/Gopkg.toml create mode 100644 vendor/github.com/Azure/go-autorest/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/README.md create mode 100644 vendor/github.com/Azure/go-autorest/autorest/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/README.md create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/config.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/persist.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/sender.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/token.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/version.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/authorization.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/autorest.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/async.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/azure.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/environments.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/rp.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/client.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/date.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/time.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/utility.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/error.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/preparer.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/responder.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/sender.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/utility.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/utility_1.13.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/utility_legacy.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/version.go create mode 100644 vendor/github.com/Azure/go-autorest/azure-pipelines.yml create mode 100644 vendor/github.com/Azure/go-autorest/doc.go create mode 100644 vendor/github.com/Azure/go-autorest/logger/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go create mode 100644 vendor/github.com/Azure/go-autorest/logger/logger.go create mode 100644 vendor/github.com/Azure/go-autorest/tracing/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go create mode 100644 vendor/github.com/Azure/go-autorest/tracing/tracing.go create mode 100644 vendor/github.com/aws/aws-sdk-go/LICENSE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go/NOTICE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/logger.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/convert_types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/awsinternal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/logger.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/validation.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/session.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/url.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/version.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/accesspoint_arn.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/outpost_arn.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/s3shared/s3err/error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport_go1.17.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/host.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/arn.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/buffered_read_seeker.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to_windows.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from_windows.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/pool.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/read_seeker_write_to.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/writer_read_from.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/sse.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sso/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sso/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sso/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sso/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go create mode 100644 vendor/github.com/c2fo/vfs/v6/.gitignore create mode 100644 vendor/github.com/c2fo/vfs/v6/.golangci.yml create mode 100644 vendor/github.com/c2fo/vfs/v6/.gvmrc create mode 100644 vendor/github.com/c2fo/vfs/v6/.mockery.yaml create mode 100644 vendor/github.com/c2fo/vfs/v6/CHANGELOG.md create mode 100644 vendor/github.com/c2fo/vfs/v6/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/c2fo/vfs/v6/License.md create mode 100644 vendor/github.com/c2fo/vfs/v6/README.md create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/all/all.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/azure/client.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/azure/doc.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/azure/file.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/azure/fileSystem.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/azure/location.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/azure/mock_client.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/azure/mock_token.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/azure/options.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/azure/properties.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/azure/token.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/backend.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/doc.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/ftp/dataconn.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/ftp/doc.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/ftp/errors.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/ftp/file.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/ftp/fileSystem.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/ftp/location.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/ftp/options.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/ftp/types/types.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/gs/bucketHandleWrapper.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/gs/doc.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/gs/file.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/gs/fileSystem.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/gs/location.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/gs/objectHandleWrapper.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/gs/options.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/helpers.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/mem/doc.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/mem/file.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/mem/fileSystem.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/mem/location.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/mem/readwriteseeker.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/os/doc.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/os/file.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/os/fileSystem.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/os/location.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/s3/doc.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/s3/file.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/s3/fileSystem.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/s3/location.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/s3/options.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/sftp/doc.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/sftp/file.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/sftp/fileSystem.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/sftp/location.go create mode 100644 vendor/github.com/c2fo/vfs/v6/backend/sftp/options.go create mode 100644 vendor/github.com/c2fo/vfs/v6/doc.go create mode 100644 vendor/github.com/c2fo/vfs/v6/errors.go create mode 100644 vendor/github.com/c2fo/vfs/v6/mocks/File.go create mode 100644 vendor/github.com/c2fo/vfs/v6/mocks/FileSystem.go create mode 100644 vendor/github.com/c2fo/vfs/v6/mocks/Location.go create mode 100644 vendor/github.com/c2fo/vfs/v6/mocks/Options.go create mode 100644 vendor/github.com/c2fo/vfs/v6/mocks/Retry.go create mode 100644 vendor/github.com/c2fo/vfs/v6/mocks/StringFile.go create mode 100644 vendor/github.com/c2fo/vfs/v6/options/delete/deleteAllVersions.go create mode 100644 vendor/github.com/c2fo/vfs/v6/options/options.go create mode 100644 vendor/github.com/c2fo/vfs/v6/utils/authority.go create mode 100644 vendor/github.com/c2fo/vfs/v6/utils/errors.go create mode 100644 vendor/github.com/c2fo/vfs/v6/utils/utils.go create mode 100644 vendor/github.com/c2fo/vfs/v6/vfs.go create mode 100644 vendor/github.com/c2fo/vfs/v6/vfssimple/doc.go create mode 100644 vendor/github.com/c2fo/vfs/v6/vfssimple/vfssimple.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/.gitignore create mode 100644 vendor/github.com/golang-jwt/jwt/v4/LICENSE create mode 100644 vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md create mode 100644 vendor/github.com/golang-jwt/jwt/v4/README.md create mode 100644 vendor/github.com/golang-jwt/jwt/v4/SECURITY.md create mode 100644 vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md create mode 100644 vendor/github.com/golang-jwt/jwt/v4/claims.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/doc.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/ecdsa.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/ed25519.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/errors.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/hmac.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/map_claims.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/none.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/parser.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/parser_option.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/rsa.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/signing_method.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf create mode 100644 vendor/github.com/golang-jwt/jwt/v4/token.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/types.go create mode 100644 vendor/github.com/hashicorp/errwrap/LICENSE create mode 100644 vendor/github.com/hashicorp/errwrap/README.md create mode 100644 vendor/github.com/hashicorp/errwrap/errwrap.go create mode 100644 vendor/github.com/hashicorp/go-multierror/LICENSE create mode 100644 vendor/github.com/hashicorp/go-multierror/Makefile create mode 100644 vendor/github.com/hashicorp/go-multierror/README.md create mode 100644 vendor/github.com/hashicorp/go-multierror/append.go create mode 100644 vendor/github.com/hashicorp/go-multierror/flatten.go create mode 100644 vendor/github.com/hashicorp/go-multierror/format.go create mode 100644 vendor/github.com/hashicorp/go-multierror/group.go create mode 100644 vendor/github.com/hashicorp/go-multierror/multierror.go create mode 100644 vendor/github.com/hashicorp/go-multierror/prefix.go create mode 100644 vendor/github.com/hashicorp/go-multierror/sort.go create mode 100644 vendor/github.com/jlaffaye/ftp/LICENSE create mode 100644 vendor/github.com/jlaffaye/ftp/README.md create mode 100644 vendor/github.com/jlaffaye/ftp/debug.go create mode 100644 vendor/github.com/jlaffaye/ftp/ftp.go create mode 100644 vendor/github.com/jlaffaye/ftp/parse.go create mode 100644 vendor/github.com/jlaffaye/ftp/scanner.go create mode 100644 vendor/github.com/jlaffaye/ftp/status.go create mode 100644 vendor/github.com/jlaffaye/ftp/walker.go create mode 100644 vendor/github.com/jmespath/go-jmespath/.gitignore create mode 100644 vendor/github.com/jmespath/go-jmespath/.travis.yml create mode 100644 vendor/github.com/jmespath/go-jmespath/LICENSE create mode 100644 vendor/github.com/jmespath/go-jmespath/Makefile create mode 100644 vendor/github.com/jmespath/go-jmespath/README.md create mode 100644 vendor/github.com/jmespath/go-jmespath/api.go create mode 100644 vendor/github.com/jmespath/go-jmespath/astnodetype_string.go create mode 100644 vendor/github.com/jmespath/go-jmespath/functions.go create mode 100644 vendor/github.com/jmespath/go-jmespath/interpreter.go create mode 100644 vendor/github.com/jmespath/go-jmespath/lexer.go create mode 100644 vendor/github.com/jmespath/go-jmespath/parser.go create mode 100644 vendor/github.com/jmespath/go-jmespath/toktype_string.go create mode 100644 vendor/github.com/jmespath/go-jmespath/util.go create mode 100644 vendor/github.com/kr/fs/LICENSE create mode 100644 vendor/github.com/kr/fs/Readme create mode 100644 vendor/github.com/kr/fs/filesystem.go create mode 100644 vendor/github.com/kr/fs/walk.go create mode 100644 vendor/github.com/mattn/go-ieproxy/.gitignore create mode 100644 vendor/github.com/mattn/go-ieproxy/LICENSE create mode 100644 vendor/github.com/mattn/go-ieproxy/README.md create mode 100644 vendor/github.com/mattn/go-ieproxy/ieproxy.go create mode 100644 vendor/github.com/mattn/go-ieproxy/ieproxy_darwin.go create mode 100644 vendor/github.com/mattn/go-ieproxy/ieproxy_ios.go create mode 100644 vendor/github.com/mattn/go-ieproxy/ieproxy_unix.go create mode 100644 vendor/github.com/mattn/go-ieproxy/ieproxy_windows.go create mode 100644 vendor/github.com/mattn/go-ieproxy/kernel32_data_windows.go create mode 100644 vendor/github.com/mattn/go-ieproxy/pac_darwin.go create mode 100644 vendor/github.com/mattn/go-ieproxy/pac_unix.go create mode 100644 vendor/github.com/mattn/go-ieproxy/pac_windows.go create mode 100644 vendor/github.com/mattn/go-ieproxy/proxy_middleman.go create mode 100644 vendor/github.com/mattn/go-ieproxy/proxy_middleman_darwin.go create mode 100644 vendor/github.com/mattn/go-ieproxy/proxy_middleman_ios.go create mode 100644 vendor/github.com/mattn/go-ieproxy/proxy_middleman_unix.go create mode 100644 vendor/github.com/mattn/go-ieproxy/proxy_middleman_windows.go create mode 100644 vendor/github.com/mattn/go-ieproxy/utils.go create mode 100644 vendor/github.com/mattn/go-ieproxy/winhttp_data_windows.go create mode 100644 vendor/github.com/pkg/sftp/.gitignore create mode 100644 vendor/github.com/pkg/sftp/CONTRIBUTORS create mode 100644 vendor/github.com/pkg/sftp/LICENSE create mode 100644 vendor/github.com/pkg/sftp/Makefile create mode 100644 vendor/github.com/pkg/sftp/README.md create mode 100644 vendor/github.com/pkg/sftp/allocator.go create mode 100644 vendor/github.com/pkg/sftp/attrs.go create mode 100644 vendor/github.com/pkg/sftp/attrs_stubs.go create mode 100644 vendor/github.com/pkg/sftp/attrs_unix.go create mode 100644 vendor/github.com/pkg/sftp/client.go create mode 100644 vendor/github.com/pkg/sftp/conn.go create mode 100644 vendor/github.com/pkg/sftp/debug.go create mode 100644 vendor/github.com/pkg/sftp/fuzz.go create mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go create mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go create mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go create mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go create mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go create mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go create mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go create mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go create mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go create mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go create mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go create mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go create mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go create mode 100644 vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/response_packets.go create mode 100644 vendor/github.com/pkg/sftp/ls_formatting.go create mode 100644 vendor/github.com/pkg/sftp/ls_plan9.go create mode 100644 vendor/github.com/pkg/sftp/ls_stub.go create mode 100644 vendor/github.com/pkg/sftp/ls_unix.go create mode 100644 vendor/github.com/pkg/sftp/match.go create mode 100644 vendor/github.com/pkg/sftp/packet-manager.go create mode 100644 vendor/github.com/pkg/sftp/packet-typing.go create mode 100644 vendor/github.com/pkg/sftp/packet.go create mode 100644 vendor/github.com/pkg/sftp/pool.go create mode 100644 vendor/github.com/pkg/sftp/release.go create mode 100644 vendor/github.com/pkg/sftp/request-attrs.go create mode 100644 vendor/github.com/pkg/sftp/request-errors.go create mode 100644 vendor/github.com/pkg/sftp/request-example.go create mode 100644 vendor/github.com/pkg/sftp/request-interfaces.go create mode 100644 vendor/github.com/pkg/sftp/request-plan9.go create mode 100644 vendor/github.com/pkg/sftp/request-readme.md create mode 100644 vendor/github.com/pkg/sftp/request-server.go create mode 100644 vendor/github.com/pkg/sftp/request-unix.go create mode 100644 vendor/github.com/pkg/sftp/request.go create mode 100644 vendor/github.com/pkg/sftp/request_windows.go create mode 100644 vendor/github.com/pkg/sftp/server.go create mode 100644 vendor/github.com/pkg/sftp/server_plan9.go create mode 100644 vendor/github.com/pkg/sftp/server_statvfs_darwin.go create mode 100644 vendor/github.com/pkg/sftp/server_statvfs_impl.go create mode 100644 vendor/github.com/pkg/sftp/server_statvfs_linux.go create mode 100644 vendor/github.com/pkg/sftp/server_statvfs_plan9.go create mode 100644 vendor/github.com/pkg/sftp/server_statvfs_stubs.go create mode 100644 vendor/github.com/pkg/sftp/server_unix.go create mode 100644 vendor/github.com/pkg/sftp/server_windows.go create mode 100644 vendor/github.com/pkg/sftp/sftp.go create mode 100644 vendor/github.com/pkg/sftp/stat_plan9.go create mode 100644 vendor/github.com/pkg/sftp/stat_posix.go create mode 100644 vendor/github.com/pkg/sftp/syscall_fixed.go create mode 100644 vendor/github.com/pkg/sftp/syscall_good.go create mode 100644 vendor/github.com/stretchr/objx/.codeclimate.yml create mode 100644 vendor/github.com/stretchr/objx/.gitignore create mode 100644 vendor/github.com/stretchr/objx/LICENSE create mode 100644 vendor/github.com/stretchr/objx/README.md create mode 100644 vendor/github.com/stretchr/objx/Taskfile.yml create mode 100644 vendor/github.com/stretchr/objx/accessors.go create mode 100644 vendor/github.com/stretchr/objx/conversions.go create mode 100644 vendor/github.com/stretchr/objx/doc.go create mode 100644 vendor/github.com/stretchr/objx/map.go create mode 100644 vendor/github.com/stretchr/objx/mutations.go create mode 100644 vendor/github.com/stretchr/objx/security.go create mode 100644 vendor/github.com/stretchr/objx/tests.go create mode 100644 vendor/github.com/stretchr/objx/type_specific.go create mode 100644 vendor/github.com/stretchr/objx/type_specific_codegen.go create mode 100644 vendor/github.com/stretchr/objx/value.go create mode 100644 vendor/github.com/stretchr/testify/mock/doc.go create mode 100644 vendor/github.com/stretchr/testify/mock/mock.go create mode 100644 vendor/golang.org/x/crypto/blowfish/block.go create mode 100644 vendor/golang.org/x/crypto/blowfish/cipher.go create mode 100644 vendor/golang.org/x/crypto/blowfish/const.go create mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/bmp-string.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/crypto.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/errors.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/mac.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/pbkdf.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/pkcs12.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/safebags.go create mode 100644 vendor/golang.org/x/crypto/ssh/buffer.go create mode 100644 vendor/golang.org/x/crypto/ssh/certs.go create mode 100644 vendor/golang.org/x/crypto/ssh/channel.go create mode 100644 vendor/golang.org/x/crypto/ssh/cipher.go create mode 100644 vendor/golang.org/x/crypto/ssh/client.go create mode 100644 vendor/golang.org/x/crypto/ssh/client_auth.go create mode 100644 vendor/golang.org/x/crypto/ssh/common.go create mode 100644 vendor/golang.org/x/crypto/ssh/connection.go create mode 100644 vendor/golang.org/x/crypto/ssh/doc.go create mode 100644 vendor/golang.org/x/crypto/ssh/handshake.go create mode 100644 vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go create mode 100644 vendor/golang.org/x/crypto/ssh/kex.go create mode 100644 vendor/golang.org/x/crypto/ssh/keys.go create mode 100644 vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go create mode 100644 vendor/golang.org/x/crypto/ssh/mac.go create mode 100644 vendor/golang.org/x/crypto/ssh/messages.go create mode 100644 vendor/golang.org/x/crypto/ssh/mux.go create mode 100644 vendor/golang.org/x/crypto/ssh/server.go create mode 100644 vendor/golang.org/x/crypto/ssh/session.go create mode 100644 vendor/golang.org/x/crypto/ssh/ssh_gss.go create mode 100644 vendor/golang.org/x/crypto/ssh/streamlocal.go create mode 100644 vendor/golang.org/x/crypto/ssh/tcpip.go create mode 100644 vendor/golang.org/x/crypto/ssh/transport.go create mode 100644 vendor/golang.org/x/net/context/context.go create mode 100644 vendor/golang.org/x/net/context/go17.go create mode 100644 vendor/golang.org/x/net/context/go19.go create mode 100644 vendor/golang.org/x/net/context/pre_go17.go create mode 100644 vendor/golang.org/x/net/context/pre_go19.go create mode 100644 vendor/golang.org/x/net/http/httpproxy/proxy.go diff --git a/go.mod b/go.mod index 155938457..92834af29 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module github.com/openshift-pipelines/tekton-caches -go 1.22 +go 1.22.0 + +toolchain go1.23.1 require ( cloud.google.com/go/storage v1.43.0 @@ -8,6 +10,7 @@ require ( github.com/aws/aws-sdk-go-v2 v1.31.0 github.com/aws/aws-sdk-go-v2/config v1.27.39 github.com/aws/aws-sdk-go-v2/service/s3 v1.64.0 + github.com/c2fo/vfs/v6 v6.19.0 github.com/codeclysm/extract/v3 v3.1.1 github.com/google/go-containerregistry v0.20.2 github.com/moby/patternmatcher v0.6.0 @@ -23,6 +26,15 @@ require ( cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect cloud.google.com/go/compute/metadata v0.5.2 // indirect cloud.google.com/go/iam v1.2.0 // indirect + github.com/Azure/azure-pipeline-go v0.2.3 // indirect + github.com/Azure/azure-storage-blob-go v0.15.0 // indirect + github.com/Azure/go-autorest v14.2.0+incompatible // indirect + github.com/Azure/go-autorest/autorest v0.11.29 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.24 // indirect + github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect + github.com/Azure/go-autorest/logger v0.2.1 // indirect + github.com/Azure/go-autorest/tracing v0.6.0 // indirect + github.com/aws/aws-sdk-go v1.55.5 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.5 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.37 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14 // indirect @@ -46,6 +58,7 @@ require ( github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/s2a-go v0.1.8 // indirect @@ -53,18 +66,26 @@ require ( github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/h2non/filetype v1.1.3 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jlaffaye/ftp v0.2.1-0.20240214224549-4edb16bfcd0f // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5 // indirect github.com/juju/loggo v1.0.0 // indirect github.com/klauspost/compress v1.16.5 // indirect - github.com/kr/pretty v0.3.1 // indirect + github.com/kr/fs v0.1.0 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/mattn/go-ieproxy v0.0.12 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0-rc3 // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/pkg/sftp v1.13.6 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/sirupsen/logrus v1.9.1 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/ulikunitz/xz v0.5.11 // indirect github.com/vbatts/tar-split v0.11.3 // indirect go.opencensus.io v0.24.0 // indirect diff --git a/go.sum b/go.sum index 21f0ee70f..4c81ab272 100644 --- a/go.sum +++ b/go.sum @@ -11,14 +11,39 @@ cloud.google.com/go/iam v1.2.0 h1:kZKMKVNk/IsSSc/udOb83K0hL/Yh/Gcqpz+oAkoIFN8= cloud.google.com/go/iam v1.2.0/go.mod h1:zITGuWgsLZxd8OwAlX+eMFgZDXzBm7icj1PVTYG766Q= cloud.google.com/go/longrunning v0.6.0 h1:mM1ZmaNsQsnb+5n1DNPeL0KwQd9jQRqSqSDEkBZr+aI= cloud.google.com/go/longrunning v0.6.0/go.mod h1:uHzSZqW89h7/pasCWNYdUpwGz3PcVWhrWupreVPYLts= +cloud.google.com/go/pubsub v1.42.0 h1:PVTbzorLryFL5ue8esTS2BfehUs0ahyNOY9qcd+HMOs= +cloud.google.com/go/pubsub v1.42.0/go.mod h1:KADJ6s4MbTwhXmse/50SebEhE4SmUwHi48z3/dHar1Y= cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= +github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= +github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= +github.com/Azure/azure-storage-blob-go v0.15.0 h1:rXtgp8tN1p29GvpGgfJetavIG0V7OgcSXPpwp3tx6qk= +github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= +github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= +github.com/Azure/go-autorest/autorest/adal v0.9.24 h1:BHZfgGsGwdkHDyZdtQRQk1WeUdW0m2WPAwuHZwUi5i4= +github.com/Azure/go-autorest/autorest/adal v0.9.24/go.mod h1:7T1+g0PYFmACYW5LlG2fcoPiPlFHjClyRGL7dRlP5c8= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= +github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/GoogleCloudPlatform/cloud-builders/gcs-fetcher v0.0.0-20240628152042-7b3987f0b238 h1:XninrdTuJS8pmxkHXI7IzlYa8Y0/4jgI7icPqVuOpEc= github.com/GoogleCloudPlatform/cloud-builders/gcs-fetcher v0.0.0-20240628152042-7b3987f0b238/go.mod h1:jeAvAnTC8i05Y+atCvf+JKavdCvsouJHMhRMngOnjQE= github.com/arduino/go-paths-helper v1.2.0 h1:qDW93PR5IZUN/jzO4rCtexiwF8P4OIcOmcSgAYLZfY4= github.com/arduino/go-paths-helper v1.2.0/go.mod h1:HpxtKph+g238EJHq4geEPv9p+gl3v5YYu35Yb+w31Ck= +github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= +github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go-v2 v1.31.0 h1:3V05LbxTSItI5kUqNwhJrrrY1BAXxXt0sN0l72QmG5U= github.com/aws/aws-sdk-go-v2 v1.31.0/go.mod h1:ztolYtaEUtdpf9Wftr31CJfLVjOnD/CVRkKOOYgF8hA= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.5 h1:xDAuZTn4IMm8o1LnBZvmrL8JA1io4o3YWNXgohbf20g= @@ -55,6 +80,8 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.31.3 h1:VzudTFrDCIDakXtemR7l6Qzt2+JY github.com/aws/aws-sdk-go-v2/service/sts v1.31.3/go.mod h1:yMWe0F+XG0DkRZK5ODZhG7BEFYhLXi2dqGsv6tX0cgI= github.com/aws/smithy-go v1.21.0 h1:H7L8dtDRk0P1Qm6y0ji7MCYMQObJ5R9CRpyPhRUkLYA= github.com/aws/smithy-go v1.21.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/c2fo/vfs/v6 v6.19.0 h1:ckb71lLqiaDjzdd3uHwiHHlvOp/Y/X+Y9SSGMg4IavU= +github.com/c2fo/vfs/v6 v6.19.0/go.mod h1:0YP92JNOxVPBZfiqdePWQlcpAdWA2UQtAplZuKfLk8A= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -74,17 +101,29 @@ github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m3 github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= +github.com/dsoprea/go-logging v0.0.0-20200710184922-b02d349568dd h1:l+vLbuxptsC6VQyQsfD7NnEC8BZuFpz45PgY+pH8YTg= +github.com/dsoprea/go-logging v0.0.0-20200710184922-b02d349568dd/go.mod h1:7I+3Pe2o/YSU88W0hWlm9S22W7XI1JFNJ86U0zPKMf8= +github.com/dsoprea/go-utility/v2 v2.0.0-20221003172846-a3e1774ef349 h1:DilThiXje0z+3UQ5YjYiSRRzVdtamFpvBQXKwMglWqw= +github.com/dsoprea/go-utility/v2 v2.0.0-20221003172846-a3e1774ef349/go.mod h1:4GC5sXji84i/p+irqghpPFZBF8tRN/Q7+700G0/DLe8= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/fsouza/fake-gcs-server v1.49.3 h1:RPt94uYjWb+t19dlZg4PVRJFCvqf7px0YZDvIiUfjcU= +github.com/fsouza/fake-gcs-server v1.49.3/go.mod h1:WsE7OZKNd5WXgiry01oJO6mDvljOr+YLPR3VQtM2sDY= +github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= +github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -113,19 +152,37 @@ github.com/google/go-containerregistry v0.20.2 h1:B1wPJ1SN/S7pB+ZAimcciVD+r+yV/l github.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEvalZBqs6AoLeWfUy34nQC8= github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= +github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= +github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= +github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= +github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg= github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jlaffaye/ftp v0.2.1-0.20240214224549-4edb16bfcd0f h1:u9Rqt4DbfQ1xc7syxtnWFNU1OjcXJeVYGsiU1q3QAI4= +github.com/jlaffaye/ftp v0.2.1-0.20240214224549-4edb16bfcd0f/go.mod h1:4p8lUl4vQ80L598CygL+3IFtm+3nggvvW/palOlViwE= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU= github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5 h1:rhqTjzJlm7EbkELJDKMTU7udov+Se0xZkWmugr6zGok= github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= @@ -135,12 +192,20 @@ github.com/juju/testing v0.0.0-20200510222523-6c8c298c77a0 h1:+WWUkhnTjV6RNOxkcw github.com/juju/testing v0.0.0-20200510222523-6c8c298c77a0/go.mod h1:hpGvhGHPVbNBraRLZEhoQwFLMrjK8PSlO4D3nDjKYXo= github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lunixbochs/vtclean v0.0.0-20160125035106-4fbf7632a2c6/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/mattn/go-colorable v0.0.6/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= +github.com/mattn/go-ieproxy v0.0.12 h1:OZkUFJC3ESNZPQ+6LzC3VJIFSnreeFLQyqvBWtvfL2M= +github.com/mattn/go-ieproxy v0.0.12/go.mod h1:Vn+N61199DAnVeTgaF8eoB9PvLO8P3OBnG95ENh7B7c= github.com/mattn/go-isatty v0.0.0-20160806122752-66b8e73f3f5c/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -150,14 +215,17 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8= github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= +github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= +github.com/pkg/xattr v0.4.10 h1:Qe0mtiNFHQZ296vRgUjRCoPHPqH7VdTOrZx3g0T+pGA= +github.com/pkg/xattr v0.4.10/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sirupsen/logrus v1.9.1 h1:Ou41VVR3nMWWmTiEUnj0OlsgOSCUFgsPAOl6jRIcVtQ= @@ -169,10 +237,13 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= @@ -180,6 +251,7 @@ github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0o github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck= github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= @@ -196,12 +268,21 @@ go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt3 go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -209,7 +290,16 @@ golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -218,18 +308,45 @@ golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbht golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= @@ -239,8 +356,12 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.199.0 h1:aWUXClp+VFJmqE0JPvpZOK3LDQMyFKYIow4etYd9qxs= google.golang.org/api v0.199.0/go.mod h1:ohG4qSztDJmZdjK/Ar6MhbAmb/Rpi4JHOqagsh90K28= @@ -279,6 +400,7 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 h1:VpOs+IwYnYBaFnrNAeB8UUWtL3vEUnzSCL1nVjPhqrw= gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/Azure/azure-pipeline-go/LICENSE b/vendor/github.com/Azure/azure-pipeline-go/LICENSE new file mode 100644 index 000000000..d1ca00f20 --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go new file mode 100644 index 000000000..d7b866cdf --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go @@ -0,0 +1,284 @@ +package pipeline + +import ( + "context" + "github.com/mattn/go-ieproxy" + "net" + "net/http" + "os" + "time" +) + +// The Factory interface represents an object that can create its Policy object. Each HTTP request sent +// requires that this Factory create a new instance of its Policy object. +type Factory interface { + New(next Policy, po *PolicyOptions) Policy +} + +// FactoryFunc is an adapter that allows the use of an ordinary function as a Factory interface. +type FactoryFunc func(next Policy, po *PolicyOptions) PolicyFunc + +// New calls f(next,po). +func (f FactoryFunc) New(next Policy, po *PolicyOptions) Policy { + return f(next, po) +} + +// The Policy interface represents a mutable Policy object created by a Factory. The object can mutate/process +// the HTTP request and then forward it on to the next Policy object in the linked-list. The returned +// Response goes backward through the linked-list for additional processing. +// NOTE: Request is passed by value so changes do not change the caller's version of +// the request. However, Request has some fields that reference mutable objects (not strings). +// These references are copied; a deep copy is not performed. Specifically, this means that +// you should avoid modifying the objects referred to by these fields: URL, Header, Body, +// GetBody, TransferEncoding, Form, MultipartForm, Trailer, TLS, Cancel, and Response. +type Policy interface { + Do(ctx context.Context, request Request) (Response, error) +} + +// PolicyFunc is an adapter that allows the use of an ordinary function as a Policy interface. +type PolicyFunc func(ctx context.Context, request Request) (Response, error) + +// Do calls f(ctx, request). +func (f PolicyFunc) Do(ctx context.Context, request Request) (Response, error) { + return f(ctx, request) +} + +// Options configures a Pipeline's behavior. +type Options struct { + HTTPSender Factory // If sender is nil, then the pipeline's default client is used to send the HTTP requests. + Log LogOptions +} + +// LogLevel tells a logger the minimum level to log. When code reports a log entry, +// the LogLevel indicates the level of the log entry. The logger only records entries +// whose level is at least the level it was told to log. See the Log* constants. +// For example, if a logger is configured with LogError, then LogError, LogPanic, +// and LogFatal entries will be logged; lower level entries are ignored. +type LogLevel uint32 + +const ( + // LogNone tells a logger not to log any entries passed to it. + LogNone LogLevel = iota + + // LogFatal tells a logger to log all LogFatal entries passed to it. + LogFatal + + // LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it. + LogPanic + + // LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it. + LogError + + // LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogWarning + + // LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogInfo + + // LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogDebug +) + +// LogOptions configures the pipeline's logging mechanism & level filtering. +type LogOptions struct { + Log func(level LogLevel, message string) + + // ShouldLog is called periodically allowing you to return whether the specified LogLevel should be logged or not. + // An application can return different values over the its lifetime; this allows the application to dynamically + // alter what is logged. NOTE: This method can be called by multiple goroutines simultaneously so make sure + // you implement it in a goroutine-safe way. If nil, nothing is logged (the equivalent of returning LogNone). + // Usually, the function will be implemented simply like this: return level <= LogWarning + ShouldLog func(level LogLevel) bool +} + +type pipeline struct { + factories []Factory + options Options +} + +// The Pipeline interface represents an ordered list of Factory objects and an object implementing the HTTPSender interface. +// You construct a Pipeline by calling the pipeline.NewPipeline function. To send an HTTP request, call pipeline.NewRequest +// and then call Pipeline's Do method passing a context, the request, and a method-specific Factory (or nil). Passing a +// method-specific Factory allows this one call to Do to inject a Policy into the linked-list. The policy is injected where +// the MethodFactoryMarker (see the pipeline.MethodFactoryMarker function) is in the slice of Factory objects. +// +// When Do is called, the Pipeline object asks each Factory object to construct its Policy object and adds each Policy to a linked-list. +// THen, Do sends the Context and Request through all the Policy objects. The final Policy object sends the request over the network +// (via the HTTPSender object passed to NewPipeline) and the response is returned backwards through all the Policy objects. +// Since Pipeline and Factory objects are goroutine-safe, you typically create 1 Pipeline object and reuse it to make many HTTP requests. +type Pipeline interface { + Do(ctx context.Context, methodFactory Factory, request Request) (Response, error) +} + +// NewPipeline creates a new goroutine-safe Pipeline object from the slice of Factory objects and the specified options. +func NewPipeline(factories []Factory, o Options) Pipeline { + if o.HTTPSender == nil { + o.HTTPSender = newDefaultHTTPClientFactory() + } + if o.Log.Log == nil { + o.Log.Log = func(LogLevel, string) {} // No-op logger + } + return &pipeline{factories: factories, options: o} +} + +// Do is called for each and every HTTP request. It tells each Factory to create its own (mutable) Policy object +// replacing a MethodFactoryMarker factory (if it exists) with the methodFactory passed in. Then, the Context and Request +// are sent through the pipeline of Policy objects (which can transform the Request's URL/query parameters/headers) and +// ultimately sends the transformed HTTP request over the network. +func (p *pipeline) Do(ctx context.Context, methodFactory Factory, request Request) (Response, error) { + response, err := p.newPolicies(methodFactory).Do(ctx, request) + request.close() + return response, err +} + +func (p *pipeline) newPolicies(methodFactory Factory) Policy { + // The last Policy is the one that actually sends the request over the wire and gets the response. + // It is overridable via the Options' HTTPSender field. + po := &PolicyOptions{pipeline: p} // One object shared by all policy objects + next := p.options.HTTPSender.New(nil, po) + + // Walk over the slice of Factory objects in reverse (from wire to API) + markers := 0 + for i := len(p.factories) - 1; i >= 0; i-- { + factory := p.factories[i] + if _, ok := factory.(methodFactoryMarker); ok { + markers++ + if markers > 1 { + panic("MethodFactoryMarker can only appear once in the pipeline") + } + if methodFactory != nil { + // Replace MethodFactoryMarker with passed-in methodFactory + next = methodFactory.New(next, po) + } + } else { + // Use the slice's Factory to construct its Policy + next = factory.New(next, po) + } + } + + // Each Factory has created its Policy + if markers == 0 && methodFactory != nil { + panic("Non-nil methodFactory requires MethodFactoryMarker in the pipeline") + } + return next // Return head of the Policy object linked-list +} + +// A PolicyOptions represents optional information that can be used by a node in the +// linked-list of Policy objects. A PolicyOptions is passed to the Factory's New method +// which passes it (if desired) to the Policy object it creates. Today, the Policy object +// uses the options to perform logging. But, in the future, this could be used for more. +type PolicyOptions struct { + pipeline *pipeline +} + +// ShouldLog returns true if the specified log level should be logged. +func (po *PolicyOptions) ShouldLog(level LogLevel) bool { + if po.pipeline.options.Log.ShouldLog != nil { + return po.pipeline.options.Log.ShouldLog(level) + } + return false +} + +// Log logs a string to the Pipeline's Logger. +func (po *PolicyOptions) Log(level LogLevel, msg string) { + if !po.ShouldLog(level) { + return // Short circuit message formatting if we're not logging it + } + + // We are logging it, ensure trailing newline + if len(msg) == 0 || msg[len(msg)-1] != '\n' { + msg += "\n" // Ensure trailing newline + } + po.pipeline.options.Log.Log(level, msg) + + // If logger doesn't handle fatal/panic, we'll do it here. + if level == LogFatal { + os.Exit(1) + } else if level == LogPanic { + panic(msg) + } +} + +var pipelineHTTPClient = newDefaultHTTPClient() + +func newDefaultHTTPClient() *http.Client { + // We want the Transport to have a large connection pool + return &http.Client{ + Transport: &http.Transport{ + Proxy: ieproxy.GetProxyFunc(), + // We use Dial instead of DialContext as DialContext has been reported to cause slower performance. + Dial /*Context*/ : (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).Dial, /*Context*/ + MaxIdleConns: 0, // No limit + MaxIdleConnsPerHost: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + DisableKeepAlives: false, + DisableCompression: false, + MaxResponseHeaderBytes: 0, + //ResponseHeaderTimeout: time.Duration{}, + //ExpectContinueTimeout: time.Duration{}, + }, + } +} + +// newDefaultHTTPClientFactory creates a DefaultHTTPClientPolicyFactory object that sends HTTP requests to a Go's default http.Client. +func newDefaultHTTPClientFactory() Factory { + return FactoryFunc(func(next Policy, po *PolicyOptions) PolicyFunc { + return func(ctx context.Context, request Request) (Response, error) { + r, err := pipelineHTTPClient.Do(request.WithContext(ctx)) + if err != nil { + err = NewError(err, "HTTP request failed") + } + return NewHTTPResponse(r), err + } + }) +} + +var mfm = methodFactoryMarker{} // Singleton + +// MethodFactoryMarker returns a special marker Factory object. When Pipeline's Do method is called, any +// MethodMarkerFactory object is replaced with the specified methodFactory object. If nil is passed fro Do's +// methodFactory parameter, then the MethodFactoryMarker is ignored as the linked-list of Policy objects is created. +func MethodFactoryMarker() Factory { + return mfm +} + +type methodFactoryMarker struct { +} + +func (methodFactoryMarker) New(next Policy, po *PolicyOptions) Policy { + panic("methodFactoryMarker policy should have been replaced with a method policy") +} + +// LogSanitizer can be implemented to clean secrets from lines logged by ForceLog +// By default no implemetation is provided here, because pipeline may be used in many different +// contexts, so the correct implementation is context-dependent +type LogSanitizer interface { + SanitizeLogMessage(raw string) string +} + +var sanitizer LogSanitizer +var enableForceLog bool = true + +// SetLogSanitizer can be called to supply a custom LogSanitizer. +// There is no threadsafety or locking on the underlying variable, +// so call this function just once at startup of your application +// (Don't later try to change the sanitizer on the fly). +func SetLogSanitizer(s LogSanitizer)(){ + sanitizer = s +} + +// SetForceLogEnabled can be used to disable ForceLog +// There is no threadsafety or locking on the underlying variable, +// so call this function just once at startup of your application +// (Don't later try to change the setting on the fly). +func SetForceLogEnabled(enable bool)() { + enableForceLog = enable +} + + diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog.go new file mode 100644 index 000000000..e7ce4970b --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog.go @@ -0,0 +1,14 @@ +package pipeline + + +// ForceLog should rarely be used. It forceable logs an entry to the +// Windows Event Log (on Windows) or to the SysLog (on Linux) +func ForceLog(level LogLevel, msg string) { + if !enableForceLog { + return + } + if sanitizer != nil { + msg = sanitizer.SanitizeLogMessage(msg) + } + forceLog(level, msg) +} diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go new file mode 100644 index 000000000..819509a1e --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go @@ -0,0 +1,33 @@ +// +build !windows,!nacl,!plan9 + +package pipeline + +import ( + "log" + "log/syslog" +) + +// forceLog should rarely be used. It forceable logs an entry to the +// Windows Event Log (on Windows) or to the SysLog (on Linux) +func forceLog(level LogLevel, msg string) { + if defaultLogger == nil { + return // Return fast if we failed to create the logger. + } + // We are logging it, ensure trailing newline + if len(msg) == 0 || msg[len(msg)-1] != '\n' { + msg += "\n" // Ensure trailing newline + } + switch level { + case LogFatal: + defaultLogger.Fatal(msg) + case LogPanic: + defaultLogger.Panic(msg) + case LogError, LogWarning, LogInfo: + defaultLogger.Print(msg) + } +} + +var defaultLogger = func() *log.Logger { + l, _ := syslog.NewLogger(syslog.LOG_USER|syslog.LOG_WARNING, log.LstdFlags) + return l +}() diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go new file mode 100644 index 000000000..5fcf40014 --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go @@ -0,0 +1,61 @@ +package pipeline + +import ( + "os" + "syscall" + "unsafe" +) + +// forceLog should rarely be used. It forceable logs an entry to the +// Windows Event Log (on Windows) or to the SysLog (on Linux) +func forceLog(level LogLevel, msg string) { + var el eventType + switch level { + case LogError, LogFatal, LogPanic: + el = elError + case LogWarning: + el = elWarning + case LogInfo: + el = elInfo + } + // We are logging it, ensure trailing newline + if len(msg) == 0 || msg[len(msg)-1] != '\n' { + msg += "\n" // Ensure trailing newline + } + reportEvent(el, 0, msg) +} + +type eventType int16 + +const ( + elSuccess eventType = 0 + elError eventType = 1 + elWarning eventType = 2 + elInfo eventType = 4 +) + +var reportEvent = func() func(eventType eventType, eventID int32, msg string) { + advAPI32 := syscall.MustLoadDLL("advapi32.dll") // lower case to tie in with Go's sysdll registration + registerEventSource := advAPI32.MustFindProc("RegisterEventSourceW") + + sourceName, _ := os.Executable() + sourceNameUTF16, _ := syscall.UTF16PtrFromString(sourceName) + handle, _, lastErr := registerEventSource.Call(uintptr(0), uintptr(unsafe.Pointer(sourceNameUTF16))) + if lastErr == nil { // On error, logging is a no-op + return func(eventType eventType, eventID int32, msg string) {} + } + reportEvent := advAPI32.MustFindProc("ReportEventW") + return func(eventType eventType, eventID int32, msg string) { + s, _ := syscall.UTF16PtrFromString(msg) + _, _, _ = reportEvent.Call( + uintptr(handle), // HANDLE hEventLog + uintptr(eventType), // WORD wType + uintptr(0), // WORD wCategory + uintptr(eventID), // DWORD dwEventID + uintptr(0), // PSID lpUserSid + uintptr(1), // WORD wNumStrings + uintptr(0), // DWORD dwDataSize + uintptr(unsafe.Pointer(&s)), // LPCTSTR *lpStrings + uintptr(0)) // LPVOID lpRawData + } +}() diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go new file mode 100644 index 000000000..b5ab05f4d --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go @@ -0,0 +1,161 @@ +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +/* +Package pipeline implements an HTTP request/response middleware pipeline whose +policy objects mutate an HTTP request's URL, query parameters, and/or headers before +the request is sent over the wire. + +Not all policy objects mutate an HTTP request; some policy objects simply impact the +flow of requests/responses by performing operations such as logging, retry policies, +timeouts, failure injection, and deserialization of response payloads. + +Implementing the Policy Interface + +To implement a policy, define a struct that implements the pipeline.Policy interface's Do method. Your Do +method is called when an HTTP request wants to be sent over the network. Your Do method can perform any +operation(s) it desires. For example, it can log the outgoing request, mutate the URL, headers, and/or query +parameters, inject a failure, etc. Your Do method must then forward the HTTP request to next Policy object +in a linked-list ensuring that the remaining Policy objects perform their work. Ultimately, the last Policy +object sends the HTTP request over the network (by calling the HTTPSender's Do method). + +When an HTTP response comes back, each Policy object in the linked-list gets a chance to process the response +(in reverse order). The Policy object can log the response, retry the operation if due to a transient failure +or timeout, deserialize the response body, etc. Ultimately, the last Policy object returns the HTTP response +to the code that initiated the original HTTP request. + +Here is a template for how to define a pipeline.Policy object: + + type myPolicy struct { + node PolicyNode + // TODO: Add configuration/setting fields here (if desired)... + } + + func (p *myPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + // TODO: Mutate/process the HTTP request here... + response, err := p.node.Do(ctx, request) // Forward HTTP request to next Policy & get HTTP response + // TODO: Mutate/process the HTTP response here... + return response, err // Return response/error to previous Policy + } + +Implementing the Factory Interface + +Each Policy struct definition requires a factory struct definition that implements the pipeline.Factory interface's New +method. The New method is called when application code wants to initiate a new HTTP request. Factory's New method is +passed a pipeline.PolicyNode object which contains a reference to the owning pipeline.Pipeline object (discussed later) and +a reference to the next Policy object in the linked list. The New method should create its corresponding Policy object +passing it the PolicyNode and any other configuration/settings fields appropriate for the specific Policy object. + +Here is a template for how to define a pipeline.Policy object: + + // NOTE: Once created & initialized, Factory objects should be goroutine-safe (ex: immutable); + // this allows reuse (efficient use of memory) and makes these objects usable by multiple goroutines concurrently. + type myPolicyFactory struct { + // TODO: Add any configuration/setting fields if desired... + } + + func (f *myPolicyFactory) New(node pipeline.PolicyNode) Policy { + return &myPolicy{node: node} // TODO: Also initialize any configuration/setting fields here (if desired)... + } + +Using your Factory and Policy objects via a Pipeline + +To use the Factory and Policy objects, an application constructs a slice of Factory objects and passes +this slice to the pipeline.NewPipeline function. + + func NewPipeline(factories []pipeline.Factory, sender pipeline.HTTPSender) Pipeline + +This function also requires an object implementing the HTTPSender interface. For simple scenarios, +passing nil for HTTPSender causes a standard Go http.Client object to be created and used to actually +send the HTTP response over the network. For more advanced scenarios, you can pass your own HTTPSender +object in. This allows sharing of http.Client objects or the use of custom-configured http.Client objects +or other objects that can simulate the network requests for testing purposes. + +Now that you have a pipeline.Pipeline object, you can create a pipeline.Request object (which is a simple +wrapper around Go's standard http.Request object) and pass it to Pipeline's Do method along with passing a +context.Context for cancelling the HTTP request (if desired). + + type Pipeline interface { + Do(ctx context.Context, methodFactory pipeline.Factory, request pipeline.Request) (pipeline.Response, error) + } + +Do iterates over the slice of Factory objects and tells each one to create its corresponding +Policy object. After the linked-list of Policy objects have been created, Do calls the first +Policy object passing it the Context & HTTP request parameters. These parameters now flow through +all the Policy objects giving each object a chance to look at and/or mutate the HTTP request. +The last Policy object sends the message over the network. + +When the network operation completes, the HTTP response and error return values pass +back through the same Policy objects in reverse order. Most Policy objects ignore the +response/error but some log the result, retry the operation (depending on the exact +reason the operation failed), or deserialize the response's body. Your own Policy +objects can do whatever they like when processing outgoing requests or incoming responses. + +Note that after an I/O request runs to completion, the Policy objects for that request +are garbage collected. However, Pipeline object (like Factory objects) are goroutine-safe allowing +them to be created once and reused over many I/O operations. This allows for efficient use of +memory and also makes them safely usable by multiple goroutines concurrently. + +Inserting a Method-Specific Factory into the Linked-List of Policy Objects + +While Pipeline and Factory objects can be reused over many different operations, it is +common to have special behavior for a specific operation/method. For example, a method +may need to deserialize the response's body to an instance of a specific data type. +To accommodate this, the Pipeline's Do method takes an additional method-specific +Factory object. The Do method tells this Factory to create a Policy object and +injects this method-specific Policy object into the linked-list of Policy objects. + +When creating a Pipeline object, the slice of Factory objects passed must have 1 +(and only 1) entry marking where the method-specific Factory should be injected. +The Factory marker is obtained by calling the pipeline.MethodFactoryMarker() function: + + func MethodFactoryMarker() pipeline.Factory + +Creating an HTTP Request Object + +The HTTP request object passed to Pipeline's Do method is not Go's http.Request struct. +Instead, it is a pipeline.Request struct which is a simple wrapper around Go's standard +http.Request. You create a pipeline.Request object by calling the pipeline.NewRequest function: + + func NewRequest(method string, url url.URL, options pipeline.RequestOptions) (request pipeline.Request, err error) + +To this function, you must pass a pipeline.RequestOptions that looks like this: + + type RequestOptions struct { + // The readable and seekable stream to be sent to the server as the request's body. + Body io.ReadSeeker + + // The callback method (if not nil) to be invoked to report progress as the stream is uploaded in the HTTP request. + Progress ProgressReceiver + } + +The method and struct ensure that the request's body stream is a read/seekable stream. +A seekable stream is required so that upon retry, the final Policy object can seek +the stream back to the beginning before retrying the network request and re-uploading the +body. In addition, you can associate a ProgressReceiver callback function which will be +invoked periodically to report progress while bytes are being read from the body stream +and sent over the network. + +Processing the HTTP Response + +When an HTTP response comes in from the network, a reference to Go's http.Response struct is +embedded in a struct that implements the pipeline.Response interface: + + type Response interface { + Response() *http.Response + } + +This interface is returned through all the Policy objects. Each Policy object can call the Response +interface's Response method to examine (or mutate) the embedded http.Response object. + +A Policy object can internally define another struct (implementing the pipeline.Response interface) +that embeds an http.Response and adds additional fields and return this structure to other Policy +objects. This allows a Policy object to deserialize the body to some other struct and return the +original http.Response and the additional struct back through the Policy chain. Other Policy objects +can see the Response but cannot see the additional struct with the deserialized body. After all the +Policy objects have returned, the pipeline.Response interface is returned by Pipeline's Do method. +The caller of this method can perform a type assertion attempting to get back to the struct type +really returned by the Policy object. If the type assertion is successful, the caller now has +access to both the http.Response and the deserialized struct object.*/ +package pipeline diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go new file mode 100644 index 000000000..5d3d4339e --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go @@ -0,0 +1,184 @@ +package pipeline + +import ( + "fmt" + "runtime" +) + +type causer interface { + Cause() error +} + +func errorWithPC(msg string, pc uintptr) string { + s := "" + if fn := runtime.FuncForPC(pc); fn != nil { + file, line := fn.FileLine(pc) + s = fmt.Sprintf("-> %v, %v:%v\n", fn.Name(), file, line) + } + s += msg + "\n\n" + return s +} + +func getPC(callersToSkip int) uintptr { + // Get the PC of Initialize method's caller. + pc := [1]uintptr{} + _ = runtime.Callers(callersToSkip, pc[:]) + return pc[0] +} + +// ErrorNode can be an embedded field in a private error object. This field +// adds Program Counter support and a 'cause' (reference to a preceding error). +// When initializing a error type with this embedded field, initialize the +// ErrorNode field by calling ErrorNode{}.Initialize(cause). +type ErrorNode struct { + pc uintptr // Represents a Program Counter that you can get symbols for. + cause error // Refers to the preceding error (or nil) +} + +// Error returns a string with the PC's symbols or "" if the PC is invalid. +// When defining a new error type, have its Error method call this one passing +// it the string representation of the error. +func (e *ErrorNode) Error(msg string) string { + s := errorWithPC(msg, e.pc) + if e.cause != nil { + s += e.cause.Error() + "\n" + } + return s +} + +// Cause returns the error that preceded this error. +func (e *ErrorNode) Cause() error { return e.cause } + +// Unwrap provides compatibility for Go 1.13 error chains. +func (e *ErrorNode) Unwrap() error { return e.cause } + +// Temporary returns true if the error occurred due to a temporary condition. +func (e ErrorNode) Temporary() bool { + type temporary interface { + Temporary() bool + } + + for err := e.cause; err != nil; { + if t, ok := err.(temporary); ok { + return t.Temporary() + } + + if cause, ok := err.(causer); ok { + err = cause.Cause() + } else { + err = nil + } + } + return false +} + +// Timeout returns true if the error occurred due to time expiring. +func (e ErrorNode) Timeout() bool { + type timeout interface { + Timeout() bool + } + + for err := e.cause; err != nil; { + if t, ok := err.(timeout); ok { + return t.Timeout() + } + + if cause, ok := err.(causer); ok { + err = cause.Cause() + } else { + err = nil + } + } + return false +} + +// Initialize is used to initialize an embedded ErrorNode field. +// It captures the caller's program counter and saves the cause (preceding error). +// To initialize the field, use "ErrorNode{}.Initialize(cause, 3)". A callersToSkip +// value of 3 is very common; but, depending on your code nesting, you may need +// a different value. +func (ErrorNode) Initialize(cause error, callersToSkip int) ErrorNode { + pc := getPC(callersToSkip) + return ErrorNode{pc: pc, cause: cause} +} + +// Cause walks all the preceding errors and return the originating error. +func Cause(err error) error { + for err != nil { + cause, ok := err.(causer) + if !ok { + break + } + err = cause.Cause() + } + return err +} + +// ErrorNodeNoCause can be an embedded field in a private error object. This field +// adds Program Counter support. +// When initializing a error type with this embedded field, initialize the +// ErrorNodeNoCause field by calling ErrorNodeNoCause{}.Initialize(). +type ErrorNodeNoCause struct { + pc uintptr // Represents a Program Counter that you can get symbols for. +} + +// Error returns a string with the PC's symbols or "" if the PC is invalid. +// When defining a new error type, have its Error method call this one passing +// it the string representation of the error. +func (e *ErrorNodeNoCause) Error(msg string) string { + return errorWithPC(msg, e.pc) +} + +// Temporary returns true if the error occurred due to a temporary condition. +func (e ErrorNodeNoCause) Temporary() bool { + return false +} + +// Timeout returns true if the error occurred due to time expiring. +func (e ErrorNodeNoCause) Timeout() bool { + return false +} + +// Initialize is used to initialize an embedded ErrorNode field. +// It captures the caller's program counter. +// To initialize the field, use "ErrorNodeNoCause{}.Initialize(3)". A callersToSkip +// value of 3 is very common; but, depending on your code nesting, you may need +// a different value. +func (ErrorNodeNoCause) Initialize(callersToSkip int) ErrorNodeNoCause { + pc := getPC(callersToSkip) + return ErrorNodeNoCause{pc: pc} +} + +// NewError creates a simple string error (like Error.New). But, this +// error also captures the caller's Program Counter and the preceding error (if provided). +func NewError(cause error, msg string) error { + if cause != nil { + return &pcError{ + ErrorNode: ErrorNode{}.Initialize(cause, 3), + msg: msg, + } + } + return &pcErrorNoCause{ + ErrorNodeNoCause: ErrorNodeNoCause{}.Initialize(3), + msg: msg, + } +} + +// pcError is a simple string error (like error.New) with an ErrorNode (PC & cause). +type pcError struct { + ErrorNode + msg string +} + +// Error satisfies the error interface. It shows the error with Program Counter +// symbols and calls Error on the preceding error so you can see the full error chain. +func (e *pcError) Error() string { return e.ErrorNode.Error(e.msg) } + +// pcErrorNoCause is a simple string error (like error.New) with an ErrorNode (PC). +type pcErrorNoCause struct { + ErrorNodeNoCause + msg string +} + +// Error satisfies the error interface. It shows the error with Program Counter symbols. +func (e *pcErrorNoCause) Error() string { return e.ErrorNodeNoCause.Error(e.msg) } diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go new file mode 100644 index 000000000..efa3c8ed0 --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go @@ -0,0 +1,82 @@ +package pipeline + +import "io" + +// ********** The following is common between the request body AND the response body. + +// ProgressReceiver defines the signature of a callback function invoked as progress is reported. +type ProgressReceiver func(bytesTransferred int64) + +// ********** The following are specific to the request body (a ReadSeekCloser) + +// This struct is used when sending a body to the network +type requestBodyProgress struct { + requestBody io.ReadSeeker // Seeking is required to support retries + pr ProgressReceiver +} + +// NewRequestBodyProgress adds progress reporting to an HTTP request's body stream. +func NewRequestBodyProgress(requestBody io.ReadSeeker, pr ProgressReceiver) io.ReadSeeker { + if pr == nil { + panic("pr must not be nil") + } + return &requestBodyProgress{requestBody: requestBody, pr: pr} +} + +// Read reads a block of data from an inner stream and reports progress +func (rbp *requestBodyProgress) Read(p []byte) (n int, err error) { + n, err = rbp.requestBody.Read(p) + if err != nil { + return + } + // Invokes the user's callback method to report progress + position, err := rbp.requestBody.Seek(0, io.SeekCurrent) + if err != nil { + panic(err) + } + rbp.pr(position) + return +} + +func (rbp *requestBodyProgress) Seek(offset int64, whence int) (offsetFromStart int64, err error) { + return rbp.requestBody.Seek(offset, whence) +} + +// requestBodyProgress supports Close but the underlying stream may not; if it does, Close will close it. +func (rbp *requestBodyProgress) Close() error { + if c, ok := rbp.requestBody.(io.Closer); ok { + return c.Close() + } + return nil +} + +// ********** The following are specific to the response body (a ReadCloser) + +// This struct is used when sending a body to the network +type responseBodyProgress struct { + responseBody io.ReadCloser + pr ProgressReceiver + offset int64 +} + +// NewResponseBodyProgress adds progress reporting to an HTTP response's body stream. +func NewResponseBodyProgress(responseBody io.ReadCloser, pr ProgressReceiver) io.ReadCloser { + if pr == nil { + panic("pr must not be nil") + } + return &responseBodyProgress{responseBody: responseBody, pr: pr, offset: 0} +} + +// Read reads a block of data from an inner stream and reports progress +func (rbp *responseBodyProgress) Read(p []byte) (n int, err error) { + n, err = rbp.responseBody.Read(p) + rbp.offset += int64(n) + + // Invokes the user's callback method to report progress + rbp.pr(rbp.offset) + return +} + +func (rbp *responseBodyProgress) Close() error { + return rbp.responseBody.Close() +} diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go new file mode 100644 index 000000000..1fbe72bd4 --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go @@ -0,0 +1,147 @@ +package pipeline + +import ( + "io" + "net/http" + "net/url" + "strconv" +) + +// Request is a thin wrapper over an http.Request. The wrapper provides several helper methods. +type Request struct { + *http.Request +} + +// NewRequest initializes a new HTTP request object with any desired options. +func NewRequest(method string, url url.URL, body io.ReadSeeker) (request Request, err error) { + // Note: the url is passed by value so that any pipeline operations that modify it do so on a copy. + + // This code to construct an http.Request is copied from http.NewRequest(); we intentionally omitted removeEmptyPort for now. + request.Request = &http.Request{ + Method: method, + URL: &url, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: url.Host, + } + + if body != nil { + err = request.SetBody(body) + } + return +} + +// SetBody sets the body and content length, assumes body is not nil. +func (r Request) SetBody(body io.ReadSeeker) error { + size, err := body.Seek(0, io.SeekEnd) + if err != nil { + return err + } + + body.Seek(0, io.SeekStart) + r.ContentLength = size + r.Header["Content-Length"] = []string{strconv.FormatInt(size, 10)} + + if size != 0 { + r.Body = &retryableRequestBody{body: body} + r.GetBody = func() (io.ReadCloser, error) { + _, err := body.Seek(0, io.SeekStart) + if err != nil { + return nil, err + } + return r.Body, nil + } + } else { + // in case the body is an empty stream, we need to use http.NoBody to explicitly provide no content + r.Body = http.NoBody + r.GetBody = func() (io.ReadCloser, error) { + return http.NoBody, nil + } + + // close the user-provided empty body + if c, ok := body.(io.Closer); ok { + c.Close() + } + } + + return nil +} + +// Copy makes a copy of an http.Request. Specifically, it makes a deep copy +// of its Method, URL, Host, Proto(Major/Minor), Header. ContentLength, Close, +// RemoteAddr, RequestURI. Copy makes a shallow copy of the Body, GetBody, TLS, +// Cancel, Response, and ctx fields. Copy panics if any of these fields are +// not nil: TransferEncoding, Form, PostForm, MultipartForm, or Trailer. +func (r Request) Copy() Request { + if r.TransferEncoding != nil || r.Form != nil || r.PostForm != nil || r.MultipartForm != nil || r.Trailer != nil { + panic("Can't make a deep copy of the http.Request because at least one of the following is not nil:" + + "TransferEncoding, Form, PostForm, MultipartForm, or Trailer.") + } + copy := *r.Request // Copy the request + urlCopy := *(r.Request.URL) // Copy the URL + copy.URL = &urlCopy + copy.Header = http.Header{} // Copy the header + for k, vs := range r.Header { + for _, value := range vs { + copy.Header.Add(k, value) + } + } + return Request{Request: ©} // Return the copy +} + +func (r Request) close() error { + if r.Body != nil && r.Body != http.NoBody { + c, ok := r.Body.(*retryableRequestBody) + if !ok { + panic("unexpected request body type (should be *retryableReadSeekerCloser)") + } + return c.realClose() + } + return nil +} + +// RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation. +func (r Request) RewindBody() error { + if r.Body != nil && r.Body != http.NoBody { + s, ok := r.Body.(io.Seeker) + if !ok { + panic("unexpected request body type (should be io.Seeker)") + } + + // Reset the stream back to the beginning + _, err := s.Seek(0, io.SeekStart) + return err + } + return nil +} + +// ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser) + +// This struct is used when sending a body to the network +type retryableRequestBody struct { + body io.ReadSeeker // Seeking is required to support retries +} + +// Read reads a block of data from an inner stream and reports progress +func (b *retryableRequestBody) Read(p []byte) (n int, err error) { + return b.body.Read(p) +} + +func (b *retryableRequestBody) Seek(offset int64, whence int) (offsetFromStart int64, err error) { + return b.body.Seek(offset, whence) +} + +func (b *retryableRequestBody) Close() error { + // We don't want the underlying transport to close the request body on transient failures so this is a nop. + // The pipeline closes the request body upon success. + return nil +} + +func (b *retryableRequestBody) realClose() error { + if c, ok := b.body.(io.Closer); ok { + return c.Close() + } + return nil +} diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go new file mode 100644 index 000000000..f2dc16482 --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go @@ -0,0 +1,74 @@ +package pipeline + +import ( + "bytes" + "fmt" + "net/http" + "sort" + "strings" +) + +// The Response interface exposes an http.Response object as it returns through the pipeline of Policy objects. +// This ensures that Policy objects have access to the HTTP response. However, the object this interface encapsulates +// might be a struct with additional fields that is created by a Policy object (typically a method-specific Factory). +// The method that injected the method-specific Factory gets this returned Response and performs a type assertion +// to the expected struct and returns the struct to its caller. +type Response interface { + Response() *http.Response +} + +// This is the default struct that has the http.Response. +// A method can replace this struct with its own struct containing an http.Response +// field and any other additional fields. +type httpResponse struct { + response *http.Response +} + +// NewHTTPResponse is typically called by a Policy object to return a Response object. +func NewHTTPResponse(response *http.Response) Response { + return &httpResponse{response: response} +} + +// This method satisfies the public Response interface's Response method +func (r httpResponse) Response() *http.Response { + return r.response +} + +// WriteRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are +// not nil, then these are also written into the Buffer. +func WriteRequestWithResponse(b *bytes.Buffer, request *http.Request, response *http.Response, err error) { + // Write the request into the buffer. + fmt.Fprint(b, " "+request.Method+" "+request.URL.String()+"\n") + writeHeader(b, request.Header) + if response != nil { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprint(b, " RESPONSE Status: "+response.Status+"\n") + writeHeader(b, response.Header) + } + if err != nil { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprint(b, " ERROR:\n"+err.Error()+"\n") + } +} + +// formatHeaders appends an HTTP request's or response's header into a Buffer. +func writeHeader(b *bytes.Buffer, header map[string][]string) { + if len(header) == 0 { + b.WriteString(" (no headers)\n") + return + } + keys := make([]string, 0, len(header)) + // Alphabetize the headers + for k := range header { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + // Redact the value of any Authorization header to prevent security information from persisting in logs + value := interface{}("REDACTED") + if !strings.EqualFold(k, "Authorization") { + value = header[k] + } + fmt.Fprintf(b, " %s: %+v\n", k, value) + } +} diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/version.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/version.go new file mode 100644 index 000000000..899f996b5 --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/version.go @@ -0,0 +1,9 @@ +package pipeline + +const ( + // UserAgent is the string to be used in the user agent string when making requests. + UserAgent = "azure-pipeline-go/" + Version + + // Version is the semantic version (see http://semver.org) of the pipeline package. + Version = "0.2.1" +) diff --git a/vendor/github.com/Azure/azure-storage-blob-go/LICENSE b/vendor/github.com/Azure/azure-storage-blob-go/LICENSE new file mode 100644 index 000000000..d1ca00f20 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/access_conditions.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/access_conditions.go new file mode 100644 index 000000000..25fe68422 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/access_conditions.go @@ -0,0 +1,65 @@ +package azblob + +import ( + "time" +) + +// ModifiedAccessConditions identifies standard HTTP access conditions which you optionally set. +type ModifiedAccessConditions struct { + IfModifiedSince time.Time + IfUnmodifiedSince time.Time + IfMatch ETag + IfNoneMatch ETag +} + +// pointers is for internal infrastructure. It returns the fields as pointers. +func (ac ModifiedAccessConditions) pointers() (ims *time.Time, ius *time.Time, ime *ETag, inme *ETag) { + if !ac.IfModifiedSince.IsZero() { + ims = &ac.IfModifiedSince + } + if !ac.IfUnmodifiedSince.IsZero() { + ius = &ac.IfUnmodifiedSince + } + if ac.IfMatch != ETagNone { + ime = &ac.IfMatch + } + if ac.IfNoneMatch != ETagNone { + inme = &ac.IfNoneMatch + } + return +} + +// ContainerAccessConditions identifies container-specific access conditions which you optionally set. +type ContainerAccessConditions struct { + ModifiedAccessConditions + LeaseAccessConditions +} + +// BlobAccessConditions identifies blob-specific access conditions which you optionally set. +type BlobAccessConditions struct { + ModifiedAccessConditions + LeaseAccessConditions +} + +// LeaseAccessConditions identifies lease access conditions for a container or blob which you optionally set. +type LeaseAccessConditions struct { + LeaseID string +} + +// pointers is for internal infrastructure. It returns the fields as pointers. +func (ac LeaseAccessConditions) pointers() (leaseID *string) { + if ac.LeaseID != "" { + leaseID = &ac.LeaseID + } + return +} + +/* +// getInt32 is for internal infrastructure. It is used with access condition values where +// 0 (the default setting) is meaningful. The library interprets 0 as do not send the header +// and the privately-storage field in the access condition object is stored as +1 higher than desired. +// THis method returns true, if the value is > 0 (explicitly set) and the stored value - 1 (the set desired value). +func getInt32(value int32) (bool, int32) { + return value > 0, value - 1 +} +*/ diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/bytes_writer.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/bytes_writer.go new file mode 100644 index 000000000..8d82ebe8c --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/bytes_writer.go @@ -0,0 +1,24 @@ +package azblob + +import ( + "errors" +) + +type bytesWriter []byte + +func newBytesWriter(b []byte) bytesWriter { + return b +} + +func (c bytesWriter) WriteAt(b []byte, off int64) (int, error) { + if off >= int64(len(c)) || off < 0 { + return 0, errors.New("Offset value is out of range") + } + + n := copy(c[int(off):], b) + if n < len(b) { + return n, errors.New("Not enough space for all bytes") + } + + return n, nil +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/chunkwriting.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/chunkwriting.go new file mode 100644 index 000000000..6beb80f88 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/chunkwriting.go @@ -0,0 +1,215 @@ +package azblob + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/binary" + "errors" + "fmt" + "io" + "sync" + "sync/atomic" + + guuid "github.com/google/uuid" +) + +// blockWriter provides methods to upload blocks that represent a file to a server and commit them. +// This allows us to provide a local implementation that fakes the server for hermetic testing. +type blockWriter interface { + StageBlock(context.Context, string, io.ReadSeeker, LeaseAccessConditions, []byte, ClientProvidedKeyOptions) (*BlockBlobStageBlockResponse, error) + CommitBlockList(context.Context, []string, BlobHTTPHeaders, Metadata, BlobAccessConditions, AccessTierType, BlobTagsMap, ClientProvidedKeyOptions, ImmutabilityPolicyOptions) (*BlockBlobCommitBlockListResponse, error) +} + +// copyFromReader copies a source io.Reader to blob storage using concurrent uploads. +// TODO(someone): The existing model provides a buffer size and buffer limit as limiting factors. The buffer size is probably +// useless other than needing to be above some number, as the network stack is going to hack up the buffer over some size. The +// max buffers is providing a cap on how much memory we use (by multiplying it times the buffer size) and how many go routines can upload +// at a time. I think having a single max memory dial would be more efficient. We can choose an internal buffer size that works +// well, 4 MiB or 8 MiB, and autoscale to as many goroutines within the memory limit. This gives a single dial to tweak and we can +// choose a max value for the memory setting based on internal transfers within Azure (which will give us the maximum throughput model). +// We can even provide a utility to dial this number in for customer networks to optimize their copies. +func copyFromReader(ctx context.Context, from io.Reader, to blockWriter, o UploadStreamToBlockBlobOptions) (*BlockBlobCommitBlockListResponse, error) { + if err := o.defaults(); err != nil { + return nil, err + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + cp := &copier{ + ctx: ctx, + cancel: cancel, + reader: from, + to: to, + id: newID(), + o: o, + errCh: make(chan error, 1), + } + + // Send all our chunks until we get an error. + var err error + for { + if err = cp.sendChunk(); err != nil { + break + } + } + // If the error is not EOF, then we have a problem. + if err != nil && !errors.Is(err, io.EOF) { + cp.wg.Wait() + return nil, err + } + + // Close out our upload. + if err := cp.close(); err != nil { + return nil, err + } + + return cp.result, nil +} + +// copier streams a file via chunks in parallel from a reader representing a file. +// Do not use directly, instead use copyFromReader(). +type copier struct { + // ctx holds the context of a copier. This is normally a faux pas to store a Context in a struct. In this case, + // the copier has the lifetime of a function call, so its fine. + ctx context.Context + cancel context.CancelFunc + + // o contains our options for uploading. + o UploadStreamToBlockBlobOptions + + // id provides the ids for each chunk. + id *id + + // reader is the source to be written to storage. + reader io.Reader + // to is the location we are writing our chunks to. + to blockWriter + + // errCh is used to hold the first error from our concurrent writers. + errCh chan error + // wg provides a count of how many writers we are waiting to finish. + wg sync.WaitGroup + + // result holds the final result from blob storage after we have submitted all chunks. + result *BlockBlobCommitBlockListResponse +} + +type copierChunk struct { + buffer []byte + id string + length int +} + +// getErr returns an error by priority. First, if a function set an error, it returns that error. Next, if the Context has an error +// it returns that error. Otherwise it is nil. getErr supports only returning an error once per copier. +func (c *copier) getErr() error { + select { + case err := <-c.errCh: + return err + default: + } + return c.ctx.Err() +} + +// sendChunk reads data from out internal reader, creates a chunk, and sends it to be written via a channel. +// sendChunk returns io.EOF when the reader returns an io.EOF or io.ErrUnexpectedEOF. +func (c *copier) sendChunk() error { + if err := c.getErr(); err != nil { + return err + } + + buffer := c.o.TransferManager.Get() + if len(buffer) == 0 { + return fmt.Errorf("TransferManager returned a 0 size buffer, this is a bug in the manager") + } + + n, err := io.ReadFull(c.reader, buffer) + if n > 0 { + // Some data was read, schedule the write. + id := c.id.next() + c.wg.Add(1) + c.o.TransferManager.Run( + func() { + defer c.wg.Done() + c.write(copierChunk{buffer: buffer, id: id, length: n}) + }, + ) + } else { + // Return the unused buffer to the manager. + c.o.TransferManager.Put(buffer) + } + + if err == nil { + return nil + } else if err == io.EOF || err == io.ErrUnexpectedEOF { + return io.EOF + } + + if cerr := c.getErr(); cerr != nil { + return cerr + } + + return err +} + +// write uploads a chunk to blob storage. +func (c *copier) write(chunk copierChunk) { + defer c.o.TransferManager.Put(chunk.buffer) + + if err := c.ctx.Err(); err != nil { + return + } + + _, err := c.to.StageBlock(c.ctx, chunk.id, bytes.NewReader(chunk.buffer[:chunk.length]), c.o.AccessConditions.LeaseAccessConditions, nil, c.o.ClientProvidedKeyOptions) + if err != nil { + c.errCh <- fmt.Errorf("write error: %w", err) + return + } +} + +// close commits our blocks to blob storage and closes our writer. +func (c *copier) close() error { + c.wg.Wait() + + if err := c.getErr(); err != nil { + return err + } + + var err error + c.result, err = c.to.CommitBlockList(c.ctx, c.id.issued(), c.o.BlobHTTPHeaders, c.o.Metadata, c.o.AccessConditions, c.o.BlobAccessTier, c.o.BlobTagsMap, c.o.ClientProvidedKeyOptions, c.o.ImmutabilityPolicyOptions) + return err +} + +// id allows the creation of unique IDs based on UUID4 + an int32. This auto-increments. +type id struct { + u [64]byte + num uint32 + all []string +} + +// newID constructs a new id. +func newID() *id { + uu := guuid.New() + u := [64]byte{} + copy(u[:], uu[:]) + return &id{u: u} +} + +// next returns the next ID. +func (id *id) next() string { + defer atomic.AddUint32(&id.num, 1) + + binary.BigEndian.PutUint32(id.u[len(guuid.UUID{}):], atomic.LoadUint32(&id.num)) + str := base64.StdEncoding.EncodeToString(id.u[:]) + id.all = append(id.all, str) + + return str +} + +// issued returns all ids that have been issued. This returned value shares the internal slice so it is not safe to modify the return. +// The value is only valid until the next time next() is called. +func (id *id) issued() []string { + return id.all +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/common_utils.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/common_utils.go new file mode 100644 index 000000000..18c3c2655 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/common_utils.go @@ -0,0 +1 @@ +package azblob diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go new file mode 100644 index 000000000..1a6da586f --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go @@ -0,0 +1,569 @@ +package azblob + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "net/http" + "os" + "sync" + "time" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// CommonResponse returns the headers common to all blob REST API responses. +type CommonResponse interface { + // ETag returns the value for header ETag. + ETag() ETag + + // LastModified returns the value for header Last-Modified. + LastModified() time.Time + + // RequestID returns the value for header x-ms-request-id. + RequestID() string + + // Date returns the value for header Date. + Date() time.Time + + // Version returns the value for header x-ms-version. + Version() string + + // Response returns the raw HTTP response object. + Response() *http.Response +} + +// UploadToBlockBlobOptions identifies options used by the UploadBufferToBlockBlob and UploadFileToBlockBlob functions. +type UploadToBlockBlobOptions struct { + // BlockSize specifies the block size to use; the default (and maximum size) is BlockBlobMaxStageBlockBytes. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are sent to the BlockBlobURL. + // Note that the progress reporting is not always increasing; it can go down when retrying a request. + Progress pipeline.ProgressReceiver + + // BlobHTTPHeaders indicates the HTTP headers to be associated with the blob. + BlobHTTPHeaders BlobHTTPHeaders + + // Metadata indicates the metadata to be associated with the blob when PutBlockList is called. + Metadata Metadata + + // AccessConditions indicates the access conditions for the block blob. + AccessConditions BlobAccessConditions + + // BlobAccessTier indicates the tier of blob + BlobAccessTier AccessTierType + + // BlobTagsMap + BlobTagsMap BlobTagsMap + + // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. + ClientProvidedKeyOptions ClientProvidedKeyOptions + + // ImmutabilityPolicyOptions indicates a immutability policy or legal hold to be placed upon finishing upload. + // A container with object-level immutability enabled is required. + ImmutabilityPolicyOptions ImmutabilityPolicyOptions + + // Parallelism indicates the maximum number of blocks to upload in parallel (0=default) + Parallelism uint16 +} + +// uploadReaderAtToBlockBlob uploads a buffer in blocks to a block blob. +func uploadReaderAtToBlockBlob(ctx context.Context, reader io.ReaderAt, readerSize int64, + blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) { + if o.BlockSize == 0 { + // If bufferSize > (BlockBlobMaxStageBlockBytes * BlockBlobMaxBlocks), then error + if readerSize > BlockBlobMaxStageBlockBytes*BlockBlobMaxBlocks { + return nil, errors.New("buffer is too large to upload to a block blob") + } + // If bufferSize <= BlockBlobMaxUploadBlobBytes, then Upload should be used with just 1 I/O request + if readerSize <= BlockBlobMaxUploadBlobBytes { + o.BlockSize = BlockBlobMaxUploadBlobBytes // Default if unspecified + } else { + o.BlockSize = readerSize / BlockBlobMaxBlocks // buffer / max blocks = block size to use all 50,000 blocks + if o.BlockSize < BlobDefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB + o.BlockSize = BlobDefaultDownloadBlockSize + } + // StageBlock will be called with blockSize blocks and a Parallelism of (BufferSize / BlockSize). + } + } + + if readerSize <= BlockBlobMaxUploadBlobBytes { + // If the size can fit in 1 Upload call, do it this way + var body io.ReadSeeker = io.NewSectionReader(reader, 0, readerSize) + if o.Progress != nil { + body = pipeline.NewRequestBodyProgress(body, o.Progress) + } + return blockBlobURL.Upload(ctx, body, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions, o.BlobAccessTier, o.BlobTagsMap, o.ClientProvidedKeyOptions, o.ImmutabilityPolicyOptions) + } + + var numBlocks = uint16(((readerSize - 1) / o.BlockSize) + 1) + + blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs + progress := int64(0) + progressLock := &sync.Mutex{} + + err := DoBatchTransfer(ctx, BatchTransferOptions{ + OperationName: "uploadReaderAtToBlockBlob", + TransferSize: readerSize, + ChunkSize: o.BlockSize, + Parallelism: o.Parallelism, + Operation: func(offset int64, count int64, ctx context.Context) error { + // This function is called once per block. + // It is passed this block's offset within the buffer and its count of bytes + // Prepare to read the proper block/section of the buffer + var body io.ReadSeeker = io.NewSectionReader(reader, offset, count) + blockNum := offset / o.BlockSize + if o.Progress != nil { + blockProgress := int64(0) + body = pipeline.NewRequestBodyProgress(body, + func(bytesTransferred int64) { + diff := bytesTransferred - blockProgress + blockProgress = bytesTransferred + progressLock.Lock() // 1 goroutine at a time gets a progress report + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + + // Block IDs are unique values to avoid issue if 2+ clients are uploading blocks + // at the same time causing PutBlockList to get a mix of blocks from all the clients. + blockIDList[blockNum] = base64.StdEncoding.EncodeToString(newUUID().bytes()) + _, err := blockBlobURL.StageBlock(ctx, blockIDList[blockNum], body, o.AccessConditions.LeaseAccessConditions, nil, o.ClientProvidedKeyOptions) + return err + }, + }) + if err != nil { + return nil, err + } + // All put blocks were successful, call Put Block List to finalize the blob + return blockBlobURL.CommitBlockList(ctx, blockIDList, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions, o.BlobAccessTier, o.BlobTagsMap, o.ClientProvidedKeyOptions, o.ImmutabilityPolicyOptions) +} + +// UploadBufferToBlockBlob uploads a buffer in blocks to a block blob. +func UploadBufferToBlockBlob(ctx context.Context, b []byte, + blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) { + return uploadReaderAtToBlockBlob(ctx, bytes.NewReader(b), int64(len(b)), blockBlobURL, o) +} + +// UploadFileToBlockBlob uploads a file in blocks to a block blob. +func UploadFileToBlockBlob(ctx context.Context, file *os.File, + blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) { + + stat, err := file.Stat() + if err != nil { + return nil, err + } + return uploadReaderAtToBlockBlob(ctx, file, stat.Size(), blockBlobURL, o) +} + +/////////////////////////////////////////////////////////////////////////////// + +const BlobDefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB + +// DownloadFromBlobOptions identifies options used by the DownloadBlobToBuffer and DownloadBlobToFile functions. +type DownloadFromBlobOptions struct { + // BlockSize specifies the block size to use for each parallel download; the default size is BlobDefaultDownloadBlockSize. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are received. + Progress pipeline.ProgressReceiver + + // AccessConditions indicates the access conditions used when making HTTP GET requests against the blob. + AccessConditions BlobAccessConditions + + // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. + ClientProvidedKeyOptions ClientProvidedKeyOptions + + // Parallelism indicates the maximum number of blocks to download in parallel (0=default) + Parallelism uint16 + + // RetryReaderOptionsPerBlock is used when downloading each block. + RetryReaderOptionsPerBlock RetryReaderOptions +} + +// downloadBlobToWriterAt downloads an Azure blob to a buffer with parallel. +func downloadBlobToWriterAt(ctx context.Context, blobURL BlobURL, offset int64, count int64, + writer io.WriterAt, o DownloadFromBlobOptions, initialDownloadResponse *DownloadResponse) error { + if o.BlockSize == 0 { + o.BlockSize = BlobDefaultDownloadBlockSize + } + + if count == CountToEnd { // If size not specified, calculate it + if initialDownloadResponse != nil { + count = initialDownloadResponse.ContentLength() - offset // if we have the length, use it + } else { + // If we don't have the length at all, get it + dr, err := blobURL.Download(ctx, 0, CountToEnd, o.AccessConditions, false, o.ClientProvidedKeyOptions) + if err != nil { + return err + } + count = dr.ContentLength() - offset + } + } + + if count <= 0 { + // The file is empty, there is nothing to download. + return nil + } + + // Prepare and do parallel download. + progress := int64(0) + progressLock := &sync.Mutex{} + + err := DoBatchTransfer(ctx, BatchTransferOptions{ + OperationName: "downloadBlobToWriterAt", + TransferSize: count, + ChunkSize: o.BlockSize, + Parallelism: o.Parallelism, + Operation: func(chunkStart int64, count int64, ctx context.Context) error { + dr, err := blobURL.Download(ctx, chunkStart+offset, count, o.AccessConditions, false, o.ClientProvidedKeyOptions) + if err != nil { + return err + } + body := dr.Body(o.RetryReaderOptionsPerBlock) + if o.Progress != nil { + rangeProgress := int64(0) + body = pipeline.NewResponseBodyProgress( + body, + func(bytesTransferred int64) { + diff := bytesTransferred - rangeProgress + rangeProgress = bytesTransferred + progressLock.Lock() + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + _, err = io.Copy(newSectionWriter(writer, chunkStart, count), body) + body.Close() + return err + }, + }) + if err != nil { + return err + } + return nil +} + +// DownloadBlobToBuffer downloads an Azure blob to a buffer with parallel. +// Offset and count are optional, pass 0 for both to download the entire blob. +func DownloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64, + b []byte, o DownloadFromBlobOptions) error { + return downloadBlobToWriterAt(ctx, blobURL, offset, count, newBytesWriter(b), o, nil) +} + +// DownloadBlobToFile downloads an Azure blob to a local file. +// The file would be truncated if the size doesn't match. +// Offset and count are optional, pass 0 for both to download the entire blob. +func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, count int64, + file *os.File, o DownloadFromBlobOptions) error { + // 1. Calculate the size of the destination file + var size int64 + + if count == CountToEnd { + // Try to get Azure blob's size + props, err := blobURL.GetProperties(ctx, o.AccessConditions, o.ClientProvidedKeyOptions) + if err != nil { + return err + } + size = props.ContentLength() - offset + } else { + size = count + } + + // 2. Compare and try to resize local file's size if it doesn't match Azure blob's size. + stat, err := file.Stat() + if err != nil { + return err + } + if stat.Size() != size { + if err = file.Truncate(size); err != nil { + return err + } + } + + if size > 0 { + return downloadBlobToWriterAt(ctx, blobURL, offset, size, file, o, nil) + } else { // if the blob's size is 0, there is no need in downloading it + return nil + } +} + +/////////////////////////////////////////////////////////////////////////////// + +// BatchTransferOptions identifies options used by DoBatchTransfer. +type BatchTransferOptions struct { + TransferSize int64 + ChunkSize int64 + Parallelism uint16 + Operation func(offset int64, chunkSize int64, ctx context.Context) error + OperationName string +} + +// DoBatchTransfer helps to execute operations in a batch manner. +// Can be used by users to customize batch works (for other scenarios that the SDK does not provide) +func DoBatchTransfer(ctx context.Context, o BatchTransferOptions) error { + if o.ChunkSize == 0 { + return errors.New("ChunkSize cannot be 0") + } + + if o.Parallelism == 0 { + o.Parallelism = 5 // default Parallelism + } + + // Prepare and do parallel operations. + numChunks := uint16(((o.TransferSize - 1) / o.ChunkSize) + 1) + operationChannel := make(chan func() error, o.Parallelism) // Create the channel that release 'Parallelism' goroutines concurrently + operationResponseChannel := make(chan error, numChunks) // Holds each response + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Create the goroutines that process each operation (in parallel). + for g := uint16(0); g < o.Parallelism; g++ { + //grIndex := g + go func() { + for f := range operationChannel { + err := f() + operationResponseChannel <- err + } + }() + } + + // Add each chunk's operation to the channel. + for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + curChunkSize := o.ChunkSize + + if chunkNum == numChunks-1 { // Last chunk + curChunkSize = o.TransferSize - (int64(chunkNum) * o.ChunkSize) // Remove size of all transferred chunks from total + } + offset := int64(chunkNum) * o.ChunkSize + + operationChannel <- func() error { + return o.Operation(offset, curChunkSize, ctx) + } + } + close(operationChannel) + + // Wait for the operations to complete. + var firstErr error = nil + for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + responseError := <-operationResponseChannel + // record the first error (the original error which should cause the other chunks to fail with canceled context) + if responseError != nil && firstErr == nil { + cancel() // As soon as any operation fails, cancel all remaining operation calls + firstErr = responseError + } + } + return firstErr +} + +//////////////////////////////////////////////////////////////////////////////////////////////// + +// TransferManager provides a buffer and thread pool manager for certain transfer options. +// It is undefined behavior if code outside of this package call any of these methods. +type TransferManager interface { + // Get provides a buffer that will be used to read data into and write out to the stream. + // It is guaranteed by this package to not read or write beyond the size of the slice. + Get() []byte + // Put may or may not put the buffer into underlying storage, depending on settings. + // The buffer must not be touched after this has been called. + Put(b []byte) + // Run will use a goroutine pool entry to run a function. This blocks until a pool + // goroutine becomes available. + Run(func()) + // Closes shuts down all internal goroutines. This must be called when the TransferManager + // will no longer be used. Not closing it will cause a goroutine leak. + Close() +} + +type staticBuffer struct { + buffers chan []byte + size int + threadpool chan func() +} + +// NewStaticBuffer creates a TransferManager that will use a channel as a circular buffer +// that can hold "max" buffers of "size". The goroutine pool is also sized at max. This +// can be shared between calls if you wish to control maximum memory and concurrency with +// multiple concurrent calls. +func NewStaticBuffer(size, max int) (TransferManager, error) { + if size < 1 || max < 1 { + return nil, fmt.Errorf("cannot be called with size or max set to < 1") + } + + if size < _1MiB { + return nil, fmt.Errorf("cannot have size < 1MiB") + } + + threadpool := make(chan func(), max) + buffers := make(chan []byte, max) + for i := 0; i < max; i++ { + go func() { + for f := range threadpool { + f() + } + }() + + buffers <- make([]byte, size) + } + return staticBuffer{ + buffers: buffers, + size: size, + threadpool: threadpool, + }, nil +} + +// Get implements TransferManager.Get(). +func (s staticBuffer) Get() []byte { + return <-s.buffers +} + +// Put implements TransferManager.Put(). +func (s staticBuffer) Put(b []byte) { + select { + case s.buffers <- b: + default: // This shouldn't happen, but just in case they call Put() with there own buffer. + } +} + +// Run implements TransferManager.Run(). +func (s staticBuffer) Run(f func()) { + s.threadpool <- f +} + +// Close implements TransferManager.Close(). +func (s staticBuffer) Close() { + close(s.threadpool) + close(s.buffers) +} + +type syncPool struct { + threadpool chan func() + pool sync.Pool +} + +// NewSyncPool creates a TransferManager that will use a sync.Pool +// that can hold a non-capped number of buffers constrained by concurrency. This +// can be shared between calls if you wish to share memory and concurrency. +func NewSyncPool(size, concurrency int) (TransferManager, error) { + if size < 1 || concurrency < 1 { + return nil, fmt.Errorf("cannot be called with size or max set to < 1") + } + + if size < _1MiB { + return nil, fmt.Errorf("cannot have size < 1MiB") + } + + threadpool := make(chan func(), concurrency) + for i := 0; i < concurrency; i++ { + go func() { + for f := range threadpool { + f() + } + }() + } + + return &syncPool{ + threadpool: threadpool, + pool: sync.Pool{ + New: func() interface{} { + return make([]byte, size) + }, + }, + }, nil +} + +// Get implements TransferManager.Get(). +func (s *syncPool) Get() []byte { + return s.pool.Get().([]byte) +} + +// Put implements TransferManager.Put(). +func (s *syncPool) Put(b []byte) { + s.pool.Put(b) +} + +// Run implements TransferManager.Run(). +func (s *syncPool) Run(f func()) { + s.threadpool <- f +} + +// Close implements TransferManager.Close(). +func (s *syncPool) Close() { + close(s.threadpool) +} + +const _1MiB = 1024 * 1024 + +// UploadStreamToBlockBlobOptions is options for UploadStreamToBlockBlob. +type UploadStreamToBlockBlobOptions struct { + // TransferManager provides a TransferManager that controls buffer allocation/reuse and + // concurrency. This overrides BufferSize and MaxBuffers if set. + TransferManager TransferManager + transferMangerNotSet bool + // BufferSize sizes the buffer used to read data from source. If < 1 MiB, defaults to 1 MiB. + BufferSize int + // MaxBuffers defines the number of simultaneous uploads will be performed to upload the file. + MaxBuffers int + BlobHTTPHeaders BlobHTTPHeaders + Metadata Metadata + AccessConditions BlobAccessConditions + BlobAccessTier AccessTierType + BlobTagsMap BlobTagsMap + ClientProvidedKeyOptions ClientProvidedKeyOptions + ImmutabilityPolicyOptions ImmutabilityPolicyOptions +} + +func (u *UploadStreamToBlockBlobOptions) defaults() error { + if u.TransferManager != nil { + return nil + } + + if u.MaxBuffers == 0 { + u.MaxBuffers = 1 + } + + if u.BufferSize < _1MiB { + u.BufferSize = _1MiB + } + + var err error + u.TransferManager, err = NewStaticBuffer(u.BufferSize, u.MaxBuffers) + if err != nil { + return fmt.Errorf("bug: default transfer manager could not be created: %s", err) + } + u.transferMangerNotSet = true + return nil +} + +// UploadStreamToBlockBlob copies the file held in io.Reader to the Blob at blockBlobURL. +// A Context deadline or cancellation will cause this to error. +func UploadStreamToBlockBlob(ctx context.Context, reader io.Reader, blockBlobURL BlockBlobURL, o UploadStreamToBlockBlobOptions) (CommonResponse, error) { + if err := o.defaults(); err != nil { + return nil, err + } + + // If we used the default manager, we need to close it. + if o.transferMangerNotSet { + defer o.TransferManager.Close() + } + + result, err := copyFromReader(ctx, reader, blockBlobURL, o) + if err != nil { + return nil, err + } + + return result, nil +} + +// UploadStreamOptions (defunct) was used internally. This will be removed or made private in a future version. +// TODO: Remove on next minor release in v0 or before v1. +type UploadStreamOptions struct { + BufferSize int + MaxBuffers int +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/parsing_urls.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/parsing_urls.go new file mode 100644 index 000000000..ff055865c --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/parsing_urls.go @@ -0,0 +1,173 @@ +package azblob + +import ( + "net" + "net/url" + "strings" +) + +const ( + snapshot = "snapshot" + versionId = "versionid" + SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00" +) + +// A BlobURLParts object represents the components that make up an Azure Storage Container/Blob URL. You parse an +// existing URL into its parts by calling NewBlobURLParts(). You construct a URL from parts by calling URL(). +// NOTE: Changing any SAS-related field requires computing a new SAS signature. +type BlobURLParts struct { + Scheme string // Ex: "https://" + Host string // Ex: "account.blob.core.windows.net", "10.132.141.33", "10.132.141.33:80" + IPEndpointStyleInfo IPEndpointStyleInfo + ContainerName string // "" if no container + BlobName string // "" if no blob + Snapshot string // "" if not a snapshot + SAS SASQueryParameters + UnparsedParams string + VersionID string // "" if not versioning enabled +} + +// IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator. +// Ex: "https://10.132.141.33/accountname/containername" +type IPEndpointStyleInfo struct { + AccountName string // "" if not using IP endpoint style +} + +// isIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as: +// http(s)://IP(:port)/storageaccount/container/... +// As url's Host property, host could be both host or host:port +func isIPEndpointStyle(host string) bool { + if host == "" { + return false + } + if h, _, err := net.SplitHostPort(host); err == nil { + host = h + } + // For IPv6, there could be case where SplitHostPort fails for cannot finding port. + // In this case, eliminate the '[' and ']' in the URL. + // For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732 + if host[0] == '[' && host[len(host)-1] == ']' { + host = host[1 : len(host)-1] + } + return net.ParseIP(host) != nil +} + +// NewBlobURLParts parses a URL initializing BlobURLParts' fields including any SAS-related & snapshot query parameters. Any other +// query parameters remain in the UnparsedParams field. This method overwrites all fields in the BlobURLParts object. +func NewBlobURLParts(u url.URL) BlobURLParts { + up := BlobURLParts{ + Scheme: u.Scheme, + Host: u.Host, + } + + // Find the container & blob names (if any) + if u.Path != "" { + path := u.Path + if path[0] == '/' { + path = path[1:] // If path starts with a slash, remove it + } + if isIPEndpointStyle(up.Host) { + if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no container name or blob + up.IPEndpointStyleInfo.AccountName = path + path = "" // No ContainerName present in the URL so path should be empty + } else { + up.IPEndpointStyleInfo.AccountName = path[:accountEndIndex] // The account name is the part between the slashes + path = path[accountEndIndex+1:] // path refers to portion after the account name now (container & blob names) + } + } + + containerEndIndex := strings.Index(path, "/") // Find the next slash (if it exists) + if containerEndIndex == -1 { // Slash not found; path has container name & no blob name + up.ContainerName = path + } else { + up.ContainerName = path[:containerEndIndex] // The container name is the part between the slashes + up.BlobName = path[containerEndIndex+1:] // The blob name is after the container slash + } + } + + // Convert the query parameters to a case-sensitive map & trim whitespace + paramsMap := u.Query() + + up.Snapshot = "" // Assume no snapshot + up.VersionID = "" // Assume no versionID + if snapshotStr, ok := caseInsensitiveValues(paramsMap).Get(snapshot); ok { + up.Snapshot = snapshotStr[0] + // If we recognized the query parameter, remove it from the map + delete(paramsMap, snapshot) + } + + if versionIDs, ok := caseInsensitiveValues(paramsMap).Get(versionId); ok { + up.VersionID = versionIDs[0] + // If we recognized the query parameter, remove it from the map + delete(paramsMap, versionId) // delete "versionid" from paramsMap + delete(paramsMap, "versionId") // delete "versionId" from paramsMap + } + up.SAS = newSASQueryParameters(paramsMap, true) + up.UnparsedParams = paramsMap.Encode() + return up +} + +type caseInsensitiveValues url.Values // map[string][]string +func (values caseInsensitiveValues) Get(key string) ([]string, bool) { + key = strings.ToLower(key) + for k, v := range values { + if strings.ToLower(k) == key { + return v, true + } + } + return []string{}, false +} + +// URL returns a URL object whose fields are initialized from the BlobURLParts fields. The URL's RawQuery +// field contains the SAS, snapshot, and unparsed query parameters. +func (up BlobURLParts) URL() url.URL { + path := "" + if isIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" { + path += "/" + up.IPEndpointStyleInfo.AccountName + } + // Concatenate container & blob names (if they exist) + if up.ContainerName != "" { + path += "/" + up.ContainerName + if up.BlobName != "" { + path += "/" + up.BlobName + } + } + + rawQuery := up.UnparsedParams + + //If no snapshot is initially provided, fill it in from the SAS query properties to help the user + if up.Snapshot == "" && !up.SAS.snapshotTime.IsZero() { + up.Snapshot = up.SAS.snapshotTime.Format(SnapshotTimeFormat) + } + + // Concatenate blob snapshot query parameter (if it exists) + if up.Snapshot != "" { + if len(rawQuery) > 0 { + rawQuery += "&" + } + rawQuery += snapshot + "=" + up.Snapshot + } + + // Concatenate blob version id query parameter (if it exists) + if up.VersionID != "" { + if len(rawQuery) > 0 { + rawQuery += "&" + } + rawQuery += versionId + "=" + up.VersionID + } + + sas := up.SAS.Encode() + if sas != "" { + if len(rawQuery) > 0 { + rawQuery += "&" + } + rawQuery += sas + } + u := url.URL{ + Scheme: up.Scheme, + Host: up.Host, + Path: path, + RawQuery: rawQuery, + } + return u +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/request_common.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/request_common.go new file mode 100644 index 000000000..1cb90dc95 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/request_common.go @@ -0,0 +1,56 @@ +package azblob + +import ( + "time" +) + +// ClientProvidedKeyOptions contains headers which may be be specified from service version 2019-02-02 +// or higher to encrypts the data on the service-side with the given key. Use of customer-provided keys +// must be done over HTTPS. As the encryption key itself is provided in the request, a secure connection +// must be established to transfer the key. +// Note: Azure Storage does not store or manage customer provided encryption keys. Keys are securely discarded +// as soon as possible after they’ve been used to encrypt or decrypt the blob data. +// https://docs.microsoft.com/en-us/azure/storage/common/storage-service-encryption +// https://docs.microsoft.com/en-us/azure/storage/common/customer-managed-keys-overview +type ClientProvidedKeyOptions struct { + // A Base64-encoded AES-256 encryption key value. + EncryptionKey *string + + // The Base64-encoded SHA256 of the encryption key. + EncryptionKeySha256 *string + + // Specifies the algorithm to use when encrypting data using the given key. Must be AES256. + EncryptionAlgorithm EncryptionAlgorithmType + + // Specifies the name of the encryption scope to use to encrypt the data provided in the request + // https://docs.microsoft.com/en-us/azure/storage/blobs/encryption-scope-overview + // https://docs.microsoft.com/en-us/azure/key-vault/general/overview + EncryptionScope *string +} + +// NewClientProvidedKeyOptions function. +// By default the value of encryption algorithm params is "AES256" for service version 2019-02-02 or higher. +func NewClientProvidedKeyOptions(ek *string, eksha256 *string, es *string) (cpk ClientProvidedKeyOptions) { + cpk = ClientProvidedKeyOptions{} + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, cpk.EncryptionScope = ek, eksha256, EncryptionAlgorithmAES256, es + return cpk +} + +type ImmutabilityPolicyOptions struct { + // A container with object-level immutability enabled is required for any options. + // Both ImmutabilityPolicy options must be filled to set an immutability policy. + ImmutabilityPolicyUntilDate *time.Time + ImmutabilityPolicyMode BlobImmutabilityPolicyModeType + + LegalHold *bool +} + +func NewImmutabilityPolicyOptions(untilDate *time.Time, policyMode BlobImmutabilityPolicyModeType, legalHold *bool) ImmutabilityPolicyOptions { + opt := ImmutabilityPolicyOptions{} + opt.ImmutabilityPolicyUntilDate, opt.ImmutabilityPolicyMode, opt.LegalHold = untilDate, policyMode, legalHold + return opt +} + +func (pol *ImmutabilityPolicyOptions) pointers() (*time.Time, BlobImmutabilityPolicyModeType, *bool) { + return pol.ImmutabilityPolicyUntilDate, pol.ImmutabilityPolicyMode, pol.LegalHold +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go new file mode 100644 index 000000000..2719b7366 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go @@ -0,0 +1,371 @@ +package azblob + +import ( + "bytes" + "fmt" + "strings" + "time" +) + +// BlobSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas +type BlobSASSignatureValues struct { + Version string `param:"sv"` // If not specified, this defaults to SASVersion + Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + SnapshotTime time.Time + Permissions string `param:"sp"` // Create by initializing a ContainerSASPermissions or BlobSASPermissions and then call String() + IPRange IPRange `param:"sip"` + Identifier string `param:"si"` + ContainerName string + BlobName string // Use "" to create a Container SAS + Directory string // Not nil for a directory SAS (ie sr=d) + CacheControl string // rscc + ContentDisposition string // rscd + ContentEncoding string // rsce + ContentLanguage string // rscl + ContentType string // rsct + BlobVersion string // sr=bv + PreauthorizedAgentObjectId string + AgentObjectId string + CorrelationId string +} + +func getDirectoryDepth(path string) string { + if path == "" { + return "" + } + return fmt.Sprint(strings.Count(path, "/") + 1) +} + +// NewSASQueryParameters uses an account's StorageAccountCredential to sign this signature values to produce +// the proper SAS query parameters. +// See: StorageAccountCredential. Compatible with both UserDelegationCredential and SharedKeyCredential +func (v BlobSASSignatureValues) NewSASQueryParameters(credential StorageAccountCredential) (SASQueryParameters, error) { + resource := "c" + if credential == nil { + return SASQueryParameters{}, fmt.Errorf("cannot sign SAS query without StorageAccountCredential") + } + + if !v.SnapshotTime.IsZero() { + resource = "bs" + //Make sure the permission characters are in the correct order + perms := &BlobSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + return SASQueryParameters{}, err + } + v.Permissions = perms.String() + } else if v.BlobVersion != "" { + resource = "bv" + //Make sure the permission characters are in the correct order + perms := &BlobSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + return SASQueryParameters{}, err + } + v.Permissions = perms.String() + } else if v.Directory != "" { + resource = "d" + v.BlobName = "" + perms := &BlobSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + return SASQueryParameters{}, err + } + v.Permissions = perms.String() + } else if v.BlobName == "" { + // Make sure the permission characters are in the correct order + perms := &ContainerSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + return SASQueryParameters{}, err + } + v.Permissions = perms.String() + } else { + resource = "b" + // Make sure the permission characters are in the correct order + perms := &BlobSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + return SASQueryParameters{}, err + } + v.Permissions = perms.String() + } + if v.Version == "" { + v.Version = SASVersion + } + startTime, expiryTime, snapshotTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime) + + signedIdentifier := v.Identifier + + udk := credential.getUDKParams() + + if udk != nil { + udkStart, udkExpiry, _ := FormatTimesForSASSigning(udk.SignedStart, udk.SignedExpiry, time.Time{}) + //I don't like this answer to combining the functions + //But because signedIdentifier and the user delegation key strings share a place, this is an _OK_ way to do it. + signedIdentifier = strings.Join([]string{ + udk.SignedOid, + udk.SignedTid, + udkStart, + udkExpiry, + udk.SignedService, + udk.SignedVersion, + v.PreauthorizedAgentObjectId, + v.AgentObjectId, + v.CorrelationId, + }, "\n") + } + + // String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + stringToSign := strings.Join([]string{ + v.Permissions, + startTime, + expiryTime, + getCanonicalName(credential.AccountName(), v.ContainerName, v.BlobName, v.Directory), + signedIdentifier, + v.IPRange.String(), + string(v.Protocol), + v.Version, + resource, + snapshotTime, // signed timestamp + v.CacheControl, // rscc + v.ContentDisposition, // rscd + v.ContentEncoding, // rsce + v.ContentLanguage, // rscl + v.ContentType}, // rsct + "\n") + + signature := "" + signature = credential.ComputeHMACSHA256(stringToSign) + + p := SASQueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Container/Blob-specific SAS parameters + resource: resource, + identifier: v.Identifier, + cacheControl: v.CacheControl, + contentDisposition: v.ContentDisposition, + contentEncoding: v.ContentEncoding, + contentLanguage: v.ContentLanguage, + contentType: v.ContentType, + snapshotTime: v.SnapshotTime, + signedDirectoryDepth: getDirectoryDepth(v.Directory), + preauthorizedAgentObjectId: v.PreauthorizedAgentObjectId, + agentObjectId: v.AgentObjectId, + correlationId: v.CorrelationId, + // Calculated SAS signature + signature: signature, + } + + //User delegation SAS specific parameters + if udk != nil { + p.signedOid = udk.SignedOid + p.signedTid = udk.SignedTid + p.signedStart = udk.SignedStart + p.signedExpiry = udk.SignedExpiry + p.signedService = udk.SignedService + p.signedVersion = udk.SignedVersion + } + + return p, nil +} + +// getCanonicalName computes the canonical name for a container or blob resource for SAS signing. +func getCanonicalName(account string, containerName string, blobName string, directoryName string) string { + // Container: "/blob/account/containername" + // Blob: "/blob/account/containername/blobname" + elements := []string{"/blob/", account, "/", containerName} + if blobName != "" { + elements = append(elements, "/", strings.Replace(blobName, "\\", "/", -1)) + } else if directoryName != "" { + elements = append(elements, "/", directoryName) + } + return strings.Join(elements, "") +} + +// The ContainerSASPermissions type simplifies creating the permissions string for an Azure Storage container SAS. +// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. +// All permissions descriptions can be found here: https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas#permissions-for-a-directory-container-or-blob +type ContainerSASPermissions struct { + Read, Add, Create, Write, Delete, DeletePreviousVersion, List, Tag, Immutability bool + Execute, ModifyOwnership, ModifyPermissions bool // Hierarchical Namespace only +} + +// String produces the SAS permissions string for an Azure Storage container. +// Call this method to set BlobSASSignatureValues's Permissions field. +func (p ContainerSASPermissions) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.DeletePreviousVersion { + b.WriteRune('x') + } + if p.List { + b.WriteRune('l') + } + if p.Tag { + b.WriteRune('t') + } + if p.Execute { + b.WriteRune('e') + } + if p.ModifyOwnership { + b.WriteRune('o') + } + if p.ModifyPermissions { + b.WriteRune('p') + } + if p.Immutability { + b.WriteRune('i') + } + return b.String() +} + +// Parse initializes the ContainerSASPermissions's fields from a string. +func (p *ContainerSASPermissions) Parse(s string) error { + *p = ContainerSASPermissions{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'x': + p.DeletePreviousVersion = true + case 'l': + p.List = true + case 't': + p.Tag = true + case 'e': + p.Execute = true + case 'o': + p.ModifyOwnership = true + case 'p': + p.ModifyPermissions = true + case 'i': + p.Immutability = true + default: + return fmt.Errorf("invalid permission: '%v'", r) + } + } + return nil +} + +// The BlobSASPermissions type simplifies creating the permissions string for an Azure Storage blob SAS. +// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. +type BlobSASPermissions struct { + Read, Add, Create, Write, Delete, DeletePreviousVersion, Tag, List, Move, Execute, Ownership, Permissions, PermanentDelete, Immutability bool +} + +// String produces the SAS permissions string for an Azure Storage blob. +// Call this method to set BlobSASSignatureValues's Permissions field. +func (p BlobSASPermissions) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.DeletePreviousVersion { + b.WriteRune('x') + } + if p.Tag { + b.WriteRune('t') + } + if p.List { + b.WriteRune('l') + } + if p.Move { + b.WriteRune('m') + } + if p.Execute { + b.WriteRune('e') + } + if p.Ownership { + b.WriteRune('o') + } + if p.Permissions { + b.WriteRune('p') + } + if p.PermanentDelete { + b.WriteRune('y') + } + if p.Immutability { + b.WriteRune('i') + } + return b.String() +} + +// Parse initializes the BlobSASPermissions's fields from a string. +func (p *BlobSASPermissions) Parse(s string) error { + *p = BlobSASPermissions{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'x': + p.DeletePreviousVersion = true + case 't': + p.Tag = true + case 'l': + p.List = true + case 'm': + p.Move = true + case 'e': + p.Execute = true + case 'o': + p.Ownership = true + case 'p': + p.Permissions = true + case 'y': + p.PermanentDelete = true + case 'i': + p.Immutability = true + default: + return fmt.Errorf("invalid permission: '%v'", r) + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/section_writer.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/section_writer.go new file mode 100644 index 000000000..6d86f6eb9 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/section_writer.go @@ -0,0 +1,47 @@ +package azblob + +import ( + "errors" + "io" +) + +type sectionWriter struct { + count int64 + offset int64 + position int64 + writerAt io.WriterAt +} + +func newSectionWriter(c io.WriterAt, off int64, count int64) *sectionWriter { + return §ionWriter{ + count: count, + offset: off, + writerAt: c, + } +} + +func (c *sectionWriter) Write(p []byte) (int, error) { + remaining := c.count - c.position + + if remaining <= 0 { + return 0, errors.New("End of section reached") + } + + slice := p + + if int64(len(slice)) > remaining { + slice = slice[:remaining] + } + + n, err := c.writerAt.WriteAt(slice, c.offset+c.position) + c.position += int64(n) + if err != nil { + return n, err + } + + if len(p) > n { + return n, errors.New("Not enough space for all bytes") + } + + return n, nil +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/service_codes_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/service_codes_blob.go new file mode 100644 index 000000000..292710cc3 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/service_codes_blob.go @@ -0,0 +1,198 @@ +package azblob + +// https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-error-codes + +// ServiceCode values indicate a service failure. +const ( + // ServiceCodeAppendPositionConditionNotMet means the append position condition specified was not met. + ServiceCodeAppendPositionConditionNotMet ServiceCodeType = "AppendPositionConditionNotMet" + + // ServiceCodeBlobAlreadyExists means the specified blob already exists. + ServiceCodeBlobAlreadyExists ServiceCodeType = "BlobAlreadyExists" + + // ServiceCodeBlobNotFound means the specified blob does not exist. + ServiceCodeBlobNotFound ServiceCodeType = "BlobNotFound" + + // ServiceCodeBlobOverwritten means the blob has been recreated since the previous snapshot was taken. + ServiceCodeBlobOverwritten ServiceCodeType = "BlobOverwritten" + + // ServiceCodeBlobTierInadequateForContentLength means the specified blob tier size limit cannot be less than content length. + ServiceCodeBlobTierInadequateForContentLength ServiceCodeType = "BlobTierInadequateForContentLength" + + // ServiceCodeBlockCountExceedsLimit means the committed block count cannot exceed the maximum limit of 50,000 blocks + // or that the uncommitted block count cannot exceed the maximum limit of 100,000 blocks. + ServiceCodeBlockCountExceedsLimit ServiceCodeType = "BlockCountExceedsLimit" + + // ServiceCodeBlockListTooLong means the block list may not contain more than 50,000 blocks. + ServiceCodeBlockListTooLong ServiceCodeType = "BlockListTooLong" + + // ServiceCodeCannotChangeToLowerTier means that a higher blob tier has already been explicitly set. + ServiceCodeCannotChangeToLowerTier ServiceCodeType = "CannotChangeToLowerTier" + + // ServiceCodeCannotVerifyCopySource means that the service could not verify the copy source within the specified time. + // Examine the HTTP status code and message for more information about the failure. + ServiceCodeCannotVerifyCopySource ServiceCodeType = "CannotVerifyCopySource" + + // ServiceCodeContainerAlreadyExists means the specified container already exists. + ServiceCodeContainerAlreadyExists ServiceCodeType = "ContainerAlreadyExists" + + // ServiceCodeContainerBeingDeleted means the specified container is being deleted. + ServiceCodeContainerBeingDeleted ServiceCodeType = "ContainerBeingDeleted" + + // ServiceCodeContainerDisabled means the specified container has been disabled by the administrator. + ServiceCodeContainerDisabled ServiceCodeType = "ContainerDisabled" + + // ServiceCodeContainerNotFound means the specified container does not exist. + ServiceCodeContainerNotFound ServiceCodeType = "ContainerNotFound" + + // ServiceCodeContentLengthLargerThanTierLimit means the blob's content length cannot exceed its tier limit. + ServiceCodeContentLengthLargerThanTierLimit ServiceCodeType = "ContentLengthLargerThanTierLimit" + + // ServiceCodeCopyAcrossAccountsNotSupported means the copy source account and destination account must be the same. + ServiceCodeCopyAcrossAccountsNotSupported ServiceCodeType = "CopyAcrossAccountsNotSupported" + + // ServiceCodeCopyIDMismatch means the specified copy ID did not match the copy ID for the pending copy operation. + ServiceCodeCopyIDMismatch ServiceCodeType = "CopyIdMismatch" + + // ServiceCodeFeatureVersionMismatch means the type of blob in the container is unrecognized by this version or + // that the operation for AppendBlob requires at least version 2015-02-21. + ServiceCodeFeatureVersionMismatch ServiceCodeType = "FeatureVersionMismatch" + + // ServiceCodeIncrementalCopyBlobMismatch means the specified source blob is different than the copy source of the existing incremental copy blob. + ServiceCodeIncrementalCopyBlobMismatch ServiceCodeType = "IncrementalCopyBlobMismatch" + + // ServiceCodeFeatureEncryptionMismatch means the given customer specified encryption does not match the encryption used to encrypt the blob. + ServiceCodeFeatureEncryptionMismatch ServiceCodeType = "BlobCustomerSpecifiedEncryptionMismatch" + + // ServiceCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed means the specified snapshot is earlier than the last snapshot copied into the incremental copy blob. + ServiceCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed ServiceCodeType = "IncrementalCopyOfEarlierVersionSnapshotNotAllowed" + + // ServiceCodeIncrementalCopySourceMustBeSnapshot means the source for incremental copy request must be a snapshot. + ServiceCodeIncrementalCopySourceMustBeSnapshot ServiceCodeType = "IncrementalCopySourceMustBeSnapshot" + + // ServiceCodeInfiniteLeaseDurationRequired means the lease ID matched, but the specified lease must be an infinite-duration lease. + ServiceCodeInfiniteLeaseDurationRequired ServiceCodeType = "InfiniteLeaseDurationRequired" + + // ServiceCodeInvalidBlobOrBlock means the specified blob or block content is invalid. + ServiceCodeInvalidBlobOrBlock ServiceCodeType = "InvalidBlobOrBlock" + + // ServiceCodeInvalidBlobType means the blob type is invalid for this operation. + ServiceCodeInvalidBlobType ServiceCodeType = "InvalidBlobType" + + // ServiceCodeInvalidBlockID means the specified block ID is invalid. The block ID must be Base64-encoded. + ServiceCodeInvalidBlockID ServiceCodeType = "InvalidBlockId" + + // ServiceCodeInvalidBlockList means the specified block list is invalid. + ServiceCodeInvalidBlockList ServiceCodeType = "InvalidBlockList" + + // ServiceCodeInvalidOperation means an invalid operation against a blob snapshot. + ServiceCodeInvalidOperation ServiceCodeType = "InvalidOperation" + + // ServiceCodeInvalidPageRange means the page range specified is invalid. + ServiceCodeInvalidPageRange ServiceCodeType = "InvalidPageRange" + + // ServiceCodeInvalidSourceBlobType means the copy source blob type is invalid for this operation. + ServiceCodeInvalidSourceBlobType ServiceCodeType = "InvalidSourceBlobType" + + // ServiceCodeInvalidSourceBlobURL means the source URL for incremental copy request must be valid Azure Storage blob URL. + ServiceCodeInvalidSourceBlobURL ServiceCodeType = "InvalidSourceBlobUrl" + + // ServiceCodeInvalidVersionForPageBlobOperation means that all operations on page blobs require at least version 2009-09-19. + ServiceCodeInvalidVersionForPageBlobOperation ServiceCodeType = "InvalidVersionForPageBlobOperation" + + // ServiceCodeLeaseAlreadyPresent means there is already a lease present. + ServiceCodeLeaseAlreadyPresent ServiceCodeType = "LeaseAlreadyPresent" + + // ServiceCodeLeaseAlreadyBroken means the lease has already been broken and cannot be broken again. + ServiceCodeLeaseAlreadyBroken ServiceCodeType = "LeaseAlreadyBroken" + + // ServiceCodeLeaseIDMismatchWithBlobOperation means the lease ID specified did not match the lease ID for the blob. + ServiceCodeLeaseIDMismatchWithBlobOperation ServiceCodeType = "LeaseIdMismatchWithBlobOperation" + + // ServiceCodeLeaseIDMismatchWithContainerOperation means the lease ID specified did not match the lease ID for the container. + ServiceCodeLeaseIDMismatchWithContainerOperation ServiceCodeType = "LeaseIdMismatchWithContainerOperation" + + // ServiceCodeLeaseIDMismatchWithLeaseOperation means the lease ID specified did not match the lease ID for the blob/container. + ServiceCodeLeaseIDMismatchWithLeaseOperation ServiceCodeType = "LeaseIdMismatchWithLeaseOperation" + + // ServiceCodeLeaseIDMissing means there is currently a lease on the blob/container and no lease ID was specified in the request. + ServiceCodeLeaseIDMissing ServiceCodeType = "LeaseIdMissing" + + // ServiceCodeLeaseIsBreakingAndCannotBeAcquired means the lease ID matched, but the lease is currently in breaking state and cannot be acquired until it is broken. + ServiceCodeLeaseIsBreakingAndCannotBeAcquired ServiceCodeType = "LeaseIsBreakingAndCannotBeAcquired" + + // ServiceCodeLeaseIsBreakingAndCannotBeChanged means the lease ID matched, but the lease is currently in breaking state and cannot be changed. + ServiceCodeLeaseIsBreakingAndCannotBeChanged ServiceCodeType = "LeaseIsBreakingAndCannotBeChanged" + + // ServiceCodeLeaseIsBrokenAndCannotBeRenewed means the lease ID matched, but the lease has been broken explicitly and cannot be renewed. + ServiceCodeLeaseIsBrokenAndCannotBeRenewed ServiceCodeType = "LeaseIsBrokenAndCannotBeRenewed" + + // ServiceCodeLeaseLost means a lease ID was specified, but the lease for the blob/container has expired. + ServiceCodeLeaseLost ServiceCodeType = "LeaseLost" + + // ServiceCodeLeaseNotPresentWithBlobOperation means there is currently no lease on the blob. + ServiceCodeLeaseNotPresentWithBlobOperation ServiceCodeType = "LeaseNotPresentWithBlobOperation" + + // ServiceCodeLeaseNotPresentWithContainerOperation means there is currently no lease on the container. + ServiceCodeLeaseNotPresentWithContainerOperation ServiceCodeType = "LeaseNotPresentWithContainerOperation" + + // ServiceCodeLeaseNotPresentWithLeaseOperation means there is currently no lease on the blob/container. + ServiceCodeLeaseNotPresentWithLeaseOperation ServiceCodeType = "LeaseNotPresentWithLeaseOperation" + + // ServiceCodeMaxBlobSizeConditionNotMet means the max blob size condition specified was not met. + ServiceCodeMaxBlobSizeConditionNotMet ServiceCodeType = "MaxBlobSizeConditionNotMet" + + // ServiceCodeNoPendingCopyOperation means there is currently no pending copy operation. + ServiceCodeNoPendingCopyOperation ServiceCodeType = "NoPendingCopyOperation" + + // ServiceCodeOperationNotAllowedOnIncrementalCopyBlob means the specified operation is not allowed on an incremental copy blob. + ServiceCodeOperationNotAllowedOnIncrementalCopyBlob ServiceCodeType = "OperationNotAllowedOnIncrementalCopyBlob" + + // ServiceCodePendingCopyOperation means there is currently a pending copy operation. + ServiceCodePendingCopyOperation ServiceCodeType = "PendingCopyOperation" + + // ServiceCodePreviousSnapshotCannotBeNewer means the prevsnapshot query parameter value cannot be newer than snapshot query parameter value. + ServiceCodePreviousSnapshotCannotBeNewer ServiceCodeType = "PreviousSnapshotCannotBeNewer" + + // ServiceCodePreviousSnapshotNotFound means the previous snapshot is not found. + ServiceCodePreviousSnapshotNotFound ServiceCodeType = "PreviousSnapshotNotFound" + + // ServiceCodePreviousSnapshotOperationNotSupported means that differential Get Page Ranges is not supported on the previous snapshot. + ServiceCodePreviousSnapshotOperationNotSupported ServiceCodeType = "PreviousSnapshotOperationNotSupported" + + // ServiceCodeSequenceNumberConditionNotMet means the sequence number condition specified was not met. + ServiceCodeSequenceNumberConditionNotMet ServiceCodeType = "SequenceNumberConditionNotMet" + + // ServiceCodeSequenceNumberIncrementTooLarge means the sequence number increment cannot be performed because it would result in overflow of the sequence number. + ServiceCodeSequenceNumberIncrementTooLarge ServiceCodeType = "SequenceNumberIncrementTooLarge" + + // ServiceCodeSnapshotCountExceeded means the snapshot count against this blob has been exceeded. + ServiceCodeSnapshotCountExceeded ServiceCodeType = "SnapshotCountExceeded" + + // ServiceCodeSnaphotOperationRateExceeded means the rate of snapshot operations against this blob has been exceeded. + ServiceCodeSnaphotOperationRateExceeded ServiceCodeType = "SnaphotOperationRateExceeded" + + // ServiceCodeSnapshotsPresent means this operation is not permitted while the blob has snapshots. + ServiceCodeSnapshotsPresent ServiceCodeType = "SnapshotsPresent" + + // ServiceCodeSourceConditionNotMet means the source condition specified using HTTP conditional header(s) is not met. + ServiceCodeSourceConditionNotMet ServiceCodeType = "SourceConditionNotMet" + + // ServiceCodeSystemInUse means this blob is in use by the system. + ServiceCodeSystemInUse ServiceCodeType = "SystemInUse" + + // ServiceCodeTargetConditionNotMet means the target condition specified using HTTP conditional header(s) is not met. + ServiceCodeTargetConditionNotMet ServiceCodeType = "TargetConditionNotMet" + + // ServiceCodeUnauthorizedBlobOverwrite means this request is not authorized to perform blob overwrites. + ServiceCodeUnauthorizedBlobOverwrite ServiceCodeType = "UnauthorizedBlobOverwrite" + + // ServiceCodeBlobBeingRehydrated means this operation is not permitted because the blob is being rehydrated. + ServiceCodeBlobBeingRehydrated ServiceCodeType = "BlobBeingRehydrated" + + // ServiceCodeBlobArchived means this operation is not permitted on an archived blob. + ServiceCodeBlobArchived ServiceCodeType = "BlobArchived" + + // ServiceCodeBlobNotArchived means this blob is currently not in the archived state. + ServiceCodeBlobNotArchived ServiceCodeType = "BlobNotArchived" +) diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/storage_account_credential.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/storage_account_credential.go new file mode 100644 index 000000000..b89b18bb4 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/storage_account_credential.go @@ -0,0 +1,8 @@ +package azblob + +// StorageAccountCredential is a wrapper interface for SharedKeyCredential and UserDelegationCredential +type StorageAccountCredential interface { + AccountName() string + ComputeHMACSHA256(message string) (base64String string) + getUDKParams() *UserDelegationKey +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go new file mode 100644 index 000000000..0fdf038ca --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go @@ -0,0 +1,161 @@ +package azblob + +import ( + "context" + "io" + "net/url" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +const ( + // AppendBlobMaxAppendBlockBytes indicates the maximum number of bytes that can be sent in a call to AppendBlock. + AppendBlobMaxAppendBlockBytes = 4 * 1024 * 1024 // 4MB + + // AppendBlobMaxBlocks indicates the maximum number of blocks allowed in an append blob. + AppendBlobMaxBlocks = 50000 +) + +// AppendBlobURL defines a set of operations applicable to append blobs. +type AppendBlobURL struct { + BlobURL + abClient appendBlobClient +} + +// NewAppendBlobURL creates an AppendBlobURL object using the specified URL and request policy pipeline. +func NewAppendBlobURL(url url.URL, p pipeline.Pipeline) AppendBlobURL { + blobClient := newBlobClient(url, p) + abClient := newAppendBlobClient(url, p) + return AppendBlobURL{BlobURL: BlobURL{blobClient: blobClient}, abClient: abClient} +} + +// WithPipeline creates a new AppendBlobURL object identical to the source but with the specific request policy pipeline. +func (ab AppendBlobURL) WithPipeline(p pipeline.Pipeline) AppendBlobURL { + return NewAppendBlobURL(ab.blobClient.URL(), p) +} + +// WithSnapshot creates a new AppendBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (ab AppendBlobURL) WithSnapshot(snapshot string) AppendBlobURL { + p := NewBlobURLParts(ab.URL()) + p.Snapshot = snapshot + return NewAppendBlobURL(p.URL(), ab.blobClient.Pipeline()) +} + +// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (ab AppendBlobURL) WithVersionID(versionId string) AppendBlobURL { + p := NewBlobURLParts(ab.URL()) + p.VersionID = versionId + return NewAppendBlobURL(p.URL(), ab.blobClient.Pipeline()) +} + +func (ab AppendBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) { + return ab.blobClient.GetAccountInfo(ctx) +} + +// Create creates a 0-length append blob. Call AppendBlock to append data to an append blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (ab AppendBlobURL) Create(ctx context.Context, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions, blobTagsMap BlobTagsMap, cpk ClientProvidedKeyOptions, immutability ImmutabilityPolicyOptions) (*AppendBlobCreateResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.ModifiedAccessConditions.pointers() + blobTagsString := SerializeBlobTagsHeader(blobTagsMap) + immutabilityExpiry, immutabilityMode, legalHold := immutability.pointers() + return ab.abClient.Create(ctx, 0, nil, + &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, + &h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition, + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V + cpk.EncryptionScope, // CPK-N + ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, + nil, // Blob ifTags + nil, + blobTagsString, // Blob tags + // immutability policy + immutabilityExpiry, immutabilityMode, legalHold, + ) +} + +// AppendBlock writes a stream to a new block of data to the end of the existing append blob. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block. +func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac AppendBlobAccessConditions, transactionalMD5 []byte, cpk ClientProvidedKeyOptions) (*AppendBlobAppendBlockResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := ac.AppendPositionAccessConditions.pointers() + count, err := validateSeekableStreamAt0AndGetCount(body) + if err != nil { + return nil, err + } + return ab.abClient.AppendBlock(ctx, body, count, nil, + transactionalMD5, + nil, // CRC + ac.LeaseAccessConditions.pointers(), + ifMaxSizeLessThanOrEqual, ifAppendPositionEqual, + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK + cpk.EncryptionScope, // CPK-N + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) +} + +// AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block-from-url. +func (ab AppendBlobURL) AppendBlockFromURL(ctx context.Context, sourceURL url.URL, offset int64, count int64, destinationAccessConditions AppendBlobAccessConditions, sourceAccessConditions ModifiedAccessConditions, transactionalMD5 []byte, cpk ClientProvidedKeyOptions, sourceAuthorization TokenCredential) (*AppendBlobAppendBlockFromURLResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := destinationAccessConditions.ModifiedAccessConditions.pointers() + sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers() + ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := destinationAccessConditions.AppendPositionAccessConditions.pointers() + return ab.abClient.AppendBlockFromURL(ctx, sourceURL.String(), 0, httpRange{offset: offset, count: count}.pointers(), + transactionalMD5, nil, nil, nil, + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK + cpk.EncryptionScope, // CPK-N + destinationAccessConditions.LeaseAccessConditions.pointers(), + ifMaxSizeLessThanOrEqual, ifAppendPositionEqual, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil, tokenCredentialPointers(sourceAuthorization)) +} + +type AppendBlobAccessConditions struct { + ModifiedAccessConditions + LeaseAccessConditions + AppendPositionAccessConditions +} + +// AppendPositionAccessConditions identifies append blob-specific access conditions which you optionally set. +type AppendPositionAccessConditions struct { + // IfAppendPositionEqual ensures that the AppendBlock operation succeeds + // only if the append position is equal to a value. + // IfAppendPositionEqual=0 means no 'IfAppendPositionEqual' header specified. + // IfAppendPositionEqual>0 means 'IfAppendPositionEqual' header specified with its value + // IfAppendPositionEqual==-1 means IfAppendPositionEqual' header specified with a value of 0 + IfAppendPositionEqual int64 + + // IfMaxSizeLessThanOrEqual ensures that the AppendBlock operation succeeds + // only if the append blob's size is less than or equal to a value. + // IfMaxSizeLessThanOrEqual=0 means no 'IfMaxSizeLessThanOrEqual' header specified. + // IfMaxSizeLessThanOrEqual>0 means 'IfMaxSizeLessThanOrEqual' header specified with its value + // IfMaxSizeLessThanOrEqual==-1 means 'IfMaxSizeLessThanOrEqual' header specified with a value of 0 + IfMaxSizeLessThanOrEqual int64 +} + +// pointers is for internal infrastructure. It returns the fields as pointers. +func (ac AppendPositionAccessConditions) pointers() (iape *int64, imsltoe *int64) { + var zero int64 // defaults to 0 + switch ac.IfAppendPositionEqual { + case -1: + iape = &zero + case 0: + iape = nil + default: + iape = &ac.IfAppendPositionEqual + } + + switch ac.IfMaxSizeLessThanOrEqual { + case -1: + imsltoe = &zero + case 0: + imsltoe = nil + default: + imsltoe = &ac.IfMaxSizeLessThanOrEqual + } + return +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_blob.go new file mode 100644 index 000000000..301d90825 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_blob.go @@ -0,0 +1,363 @@ +package azblob + +import ( + "context" + "net/url" + "strings" + "time" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// A BlobURL represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob. +type BlobURL struct { + blobClient blobClient +} + +type BlobTagsMap map[string]string + +var DefaultAccessTier = AccessTierNone +var DefaultPremiumBlobAccessTier = PremiumPageBlobAccessTierNone + +// NewBlobURL creates a BlobURL object using the specified URL and request policy pipeline. +func NewBlobURL(url url.URL, p pipeline.Pipeline) BlobURL { + blobClient := newBlobClient(url, p) + return BlobURL{blobClient: blobClient} +} + +// URL returns the URL endpoint used by the BlobURL object. +func (b BlobURL) URL() url.URL { + return b.blobClient.URL() +} + +// String returns the URL as a string. +func (b BlobURL) String() string { + u := b.URL() + return u.String() +} + +func (b BlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) { + return b.blobClient.GetAccountInfo(ctx) +} + +// WithPipeline creates a new BlobURL object identical to the source but with the specified request policy pipeline. +func (b BlobURL) WithPipeline(p pipeline.Pipeline) BlobURL { + return NewBlobURL(b.blobClient.URL(), p) +} + +// WithSnapshot creates a new BlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (b BlobURL) WithSnapshot(snapshot string) BlobURL { + p := NewBlobURLParts(b.URL()) + p.Snapshot = snapshot + return NewBlobURL(p.URL(), b.blobClient.Pipeline()) +} + +// WithVersionID creates a new BlobURL object identical to the source but with the specified version id. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (b BlobURL) WithVersionID(versionID string) BlobURL { + p := NewBlobURLParts(b.URL()) + p.VersionID = versionID + return NewBlobURL(p.URL(), b.blobClient.Pipeline()) +} + +// ToAppendBlobURL creates an AppendBlobURL using the source's URL and pipeline. +func (b BlobURL) ToAppendBlobURL() AppendBlobURL { + return NewAppendBlobURL(b.URL(), b.blobClient.Pipeline()) +} + +// ToBlockBlobURL creates a BlockBlobURL using the source's URL and pipeline. +func (b BlobURL) ToBlockBlobURL() BlockBlobURL { + return NewBlockBlobURL(b.URL(), b.blobClient.Pipeline()) +} + +// ToPageBlobURL creates a PageBlobURL using the source's URL and pipeline. +func (b BlobURL) ToPageBlobURL() PageBlobURL { + return NewPageBlobURL(b.URL(), b.blobClient.Pipeline()) +} + +func SerializeBlobTagsHeader(blobTagsMap BlobTagsMap) *string { + if len(blobTagsMap) == 0 { + return nil + } + tags := make([]string, 0) + for key, val := range blobTagsMap { + tags = append(tags, url.QueryEscape(key)+"="+url.QueryEscape(val)) + } + //tags = tags[:len(tags)-1] + blobTagsString := strings.Join(tags, "&") + return &blobTagsString +} + +func SerializeBlobTags(blobTagsMap BlobTagsMap) BlobTags { + if len(blobTagsMap) == 0 { + return BlobTags{} + } + blobTagSet := make([]BlobTag, 0, len(blobTagsMap)) + for key, val := range blobTagsMap { + blobTagSet = append(blobTagSet, BlobTag{Key: key, Value: val}) + } + return BlobTags{BlobTagSet: blobTagSet} +} + +// Download reads a range of bytes from a blob. The response also includes the blob's properties and metadata. +// Passing azblob.CountToEnd (0) for count will download the blob from the offset to the end. +// Note: Snapshot/VersionId are optional parameters which are part of request URL query params. +// These parameters can be explicitly set by calling WithSnapshot(snapshot string)/WithVersionID(versionID string) +// Therefore it not required to pass these here. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. +func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac BlobAccessConditions, rangeGetContentMD5 bool, cpk ClientProvidedKeyOptions) (*DownloadResponse, error) { + var xRangeGetContentMD5 *bool + if rangeGetContentMD5 { + xRangeGetContentMD5 = &rangeGetContentMD5 + } + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + dr, err := b.blobClient.Download(ctx, nil, nil, nil, + httpRange{offset: offset, count: count}.pointers(), + ac.LeaseAccessConditions.pointers(), xRangeGetContentMD5, nil, + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) + if err != nil { + return nil, err + } + return &DownloadResponse{ + b: b, + r: dr, + ctx: ctx, + getInfo: HTTPGetterInfo{Offset: offset, Count: count, ETag: dr.ETag()}, + }, err +} + +// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +// Note 1: that deleting a blob also deletes all its snapshots. +// Note 2: Snapshot/VersionId are optional parameters which are part of request URL query params. +// These parameters can be explicitly set by calling WithSnapshot(snapshot string)/WithVersionID(versionID string) +// Therefore it not required to pass these here. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (b BlobURL) Delete(ctx context.Context, deleteOptions DeleteSnapshotsOptionType, ac BlobAccessConditions) (*BlobDeleteResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + return b.blobClient.Delete(ctx, nil, nil, nil, ac.LeaseAccessConditions.pointers(), deleteOptions, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil, BlobDeleteNone) +} + +// PermanentDelete permanently deletes soft-deleted snapshots & soft-deleted version blobs and is a dangerous operation and SHOULD NOT BE USED. +// WARNING: This operation should not be used unless you know exactly the implications. We will not provide support for this API. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (b BlobURL) PermanentDelete(ctx context.Context, deleteOptions DeleteSnapshotsOptionType, ac BlobAccessConditions) (*BlobDeleteResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + return b.blobClient.Delete(ctx, nil, nil, nil, ac.LeaseAccessConditions.pointers(), deleteOptions, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil, BlobDeletePermanent) +} + +// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. +// Each call to this operation replaces all existing tags attached to the blob. +// To remove all tags from the blob, call this operation with no tags set. +// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags +func (b BlobURL) SetTags(ctx context.Context, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, ifTags *string, blobTagsMap BlobTagsMap) (*BlobSetTagsResponse, error) { + tags := SerializeBlobTags(blobTagsMap) + return b.blobClient.SetTags(ctx, nil, nil, transactionalContentMD5, transactionalContentCrc64, nil, ifTags, nil, &tags) +} + +// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. +// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags +func (b BlobURL) GetTags(ctx context.Context, ifTags *string) (*BlobTags, error) { + return b.blobClient.GetTags(ctx, nil, nil, nil, nil, ifTags, nil) +} + +// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. +func (b BlobURL) Undelete(ctx context.Context) (*BlobUndeleteResponse, error) { + return b.blobClient.Undelete(ctx, nil, nil) +} + +// SetTier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage account +// and on a block blob in a blob storage account (locally redundant storage only). +// A premium page blob's tier determines the allowed size, IOPS, and bandwidth of the blob. +// A block blob's tier determines Hot/Cool/Archive storage type. This operation does not update the blob's ETag. +// Note: VersionId is an optional parameter which is part of request URL query params. +// It can be explicitly set by calling WithVersionID(versionID string) function and hence it not required to pass it here. +// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. +func (b BlobURL) SetTier(ctx context.Context, tier AccessTierType, lac LeaseAccessConditions, rehydratePriority RehydratePriorityType) (*BlobSetTierResponse, error) { + return b.blobClient.SetTier(ctx, tier, nil, + nil, // Blob versioning + nil, rehydratePriority, nil, lac.pointers(), + nil) // Blob ifTags +} + +// GetProperties returns the blob's properties. +// Note: Snapshot/VersionId are optional parameters which are part of request URL query params. +// These parameters can be explicitly set by calling WithSnapshot(snapshot string)/WithVersionID(versionID string) +// Therefore it not required to pass these here. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. +func (b BlobURL) GetProperties(ctx context.Context, ac BlobAccessConditions, cpk ClientProvidedKeyOptions) (*BlobGetPropertiesResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + return b.blobClient.GetProperties(ctx, nil, + nil, // Blob versioning + nil, ac.LeaseAccessConditions.pointers(), + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) +} + +// SetHTTPHeaders changes a blob's HTTP headers. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (b BlobURL) SetHTTPHeaders(ctx context.Context, h BlobHTTPHeaders, ac BlobAccessConditions) (*BlobSetHTTPHeadersResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + return b.blobClient.SetHTTPHeaders(ctx, nil, + &h.CacheControl, &h.ContentType, h.ContentMD5, &h.ContentEncoding, &h.ContentLanguage, + ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + &h.ContentDisposition, nil) +} + +// SetMetadata changes a blob's metadata. +// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. +func (b BlobURL) SetMetadata(ctx context.Context, metadata Metadata, ac BlobAccessConditions, cpk ClientProvidedKeyOptions) (*BlobSetMetadataResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + return b.blobClient.SetMetadata(ctx, nil, metadata, ac.LeaseAccessConditions.pointers(), + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V + cpk.EncryptionScope, // CPK-N + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) +} + +// CreateSnapshot creates a read-only snapshot of a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. +func (b BlobURL) CreateSnapshot(ctx context.Context, metadata Metadata, ac BlobAccessConditions, cpk ClientProvidedKeyOptions) (*BlobCreateSnapshotResponse, error) { + // CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter + // because checking this would be a performance hit for a VERY unusual path and I don't think the common case should suffer this + // performance hit. + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + return b.blobClient.CreateSnapshot(ctx, nil, metadata, + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V + cpk.EncryptionScope, // CPK-N + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + ac.LeaseAccessConditions.pointers(), nil) +} + +// AcquireLease acquires a lease on the blob for write and delete operations. The lease duration must be between +// 15 to 60 seconds, or infinite (-1). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (b BlobURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac ModifiedAccessConditions) (*BlobAcquireLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() + return b.blobClient.AcquireLease(ctx, nil, &duration, &proposedID, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) +} + +// RenewLease renews the blob's previously-acquired lease. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (b BlobURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobRenewLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() + return b.blobClient.RenewLease(ctx, leaseID, nil, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) +} + +// ReleaseLease releases the blob's previously-acquired lease. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (b BlobURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobReleaseLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() + return b.blobClient.ReleaseLease(ctx, leaseID, nil, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) +} + +// BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) +// constant to break a fixed-duration lease when it expires or an infinite lease immediately. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (b BlobURL) BreakLease(ctx context.Context, breakPeriodInSeconds int32, ac ModifiedAccessConditions) (*BlobBreakLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() + return b.blobClient.BreakLease(ctx, nil, leasePeriodPointer(breakPeriodInSeconds), + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) +} + +// ChangeLease changes the blob's lease ID. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (b BlobURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac ModifiedAccessConditions) (*BlobChangeLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() + return b.blobClient.ChangeLease(ctx, leaseID, proposedID, + nil, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) +} + +// LeaseBreakNaturally tells ContainerURL's or BlobURL's BreakLease method to break the lease using service semantics. +const LeaseBreakNaturally = -1 + +func leasePeriodPointer(period int32) (p *int32) { + if period != LeaseBreakNaturally { + p = &period + } + return nil +} + +// StartCopyFromURL copies the data at the source URL to a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. +func (b BlobURL) StartCopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions, tier AccessTierType, blobTagsMap BlobTagsMap) (*BlobStartCopyFromURLResponse, error) { + srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers() + dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers() + dstLeaseID := dstac.LeaseAccessConditions.pointers() + blobTagsString := SerializeBlobTagsHeader(blobTagsMap) + return b.blobClient.StartCopyFromURL(ctx, source.String(), nil, metadata, + tier, RehydratePriorityNone, srcIfModifiedSince, srcIfUnmodifiedSince, + srcIfMatchETag, srcIfNoneMatchETag, + nil, // source ifTags + dstIfModifiedSince, dstIfUnmodifiedSince, + dstIfMatchETag, dstIfNoneMatchETag, + nil, // Blob ifTags + dstLeaseID, + nil, + blobTagsString, // Blob tags + nil, + // immutability policy + nil, BlobImmutabilityPolicyModeNone, nil, + ) +} + +// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. +func (b BlobURL) AbortCopyFromURL(ctx context.Context, copyID string, ac LeaseAccessConditions) (*BlobAbortCopyFromURLResponse, error) { + return b.blobClient.AbortCopyFromURL(ctx, copyID, nil, ac.pointers(), nil) +} + +// SetImmutabilityPolicy sets a temporary immutability policy with an expiration date. The expiration date must be in the future. +// While the immutability policy is active, the blob can be read but not modified or deleted. +// For more information, see https://docs.microsoft.com/en-us/azure/storage/blobs/immutable-time-based-retention-policy-overview (Feature overview) +// and https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-immutability-policy (REST API reference) +// A container with object-level immutability enabled is required. +func (b BlobURL) SetImmutabilityPolicy(ctx context.Context, expiry time.Time, mode BlobImmutabilityPolicyModeType, ifUnmodifiedSince *time.Time) (*BlobSetImmutabilityPolicyResponse, error) { + return b.blobClient.SetImmutabilityPolicy(ctx, nil, nil, ifUnmodifiedSince, &expiry, mode) +} + +// DeleteImmutabilityPolicy deletes a temporary immutability policy with an expiration date. +// While the immutability policy is active, the blob can be read but not modified or deleted. +// For more information, see https://docs.microsoft.com/en-us/azure/storage/blobs/immutable-time-based-retention-policy-overview (Feature overview) +// and https://docs.microsoft.com/en-us/rest/api/storageservices/delete-blob-immutability-policy (REST API reference) +// A container with object-level immutability enabled is required. +func (b BlobURL) DeleteImmutabilityPolicy(ctx context.Context) (*BlobDeleteImmutabilityPolicyResponse, error) { + return b.blobClient.DeleteImmutabilityPolicy(ctx, nil, nil) +} + +// SetLegalHold enables a temporary immutability policy that can be applied for general data protection purposes. +// It stores the current blob version in a WORM (Write-Once Read-Many) state. While in effect, the blob can be read but not modified or deleted. +// For more information, see https://docs.microsoft.com/en-us/azure/storage/blobs/immutable-legal-hold-overview (Feature overview) +// and https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-legal-hold (REST API reference) +// A container with object-level immutability enabled is required. +func (b BlobURL) SetLegalHold(ctx context.Context, legalHold bool) (*BlobSetLegalHoldResponse, error) { + return b.blobClient.SetLegalHold(ctx, legalHold, nil, nil) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go new file mode 100644 index 000000000..ae0079e8e --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go @@ -0,0 +1,182 @@ +package azblob + +import ( + "context" + "io" + "net/url" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +const ( + // BlockBlobMaxUploadBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload. + BlockBlobMaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB + + // BlockBlobMaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock. + BlockBlobMaxStageBlockBytes = 4000 * 1024 * 1024 // 4000MiB + + // BlockBlobMaxBlocks indicates the maximum number of blocks allowed in a block blob. + BlockBlobMaxBlocks = 50000 +) + +// BlockBlobURL defines a set of operations applicable to block blobs. +type BlockBlobURL struct { + BlobURL + bbClient blockBlobClient +} + +// NewBlockBlobURL creates a BlockBlobURL object using the specified URL and request policy pipeline. +func NewBlockBlobURL(url url.URL, p pipeline.Pipeline) BlockBlobURL { + blobClient := newBlobClient(url, p) + bbClient := newBlockBlobClient(url, p) + return BlockBlobURL{BlobURL: BlobURL{blobClient: blobClient}, bbClient: bbClient} +} + +// WithPipeline creates a new BlockBlobURL object identical to the source but with the specific request policy pipeline. +func (bb BlockBlobURL) WithPipeline(p pipeline.Pipeline) BlockBlobURL { + return NewBlockBlobURL(bb.blobClient.URL(), p) +} + +// WithSnapshot creates a new BlockBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (bb BlockBlobURL) WithSnapshot(snapshot string) BlockBlobURL { + p := NewBlobURLParts(bb.URL()) + p.Snapshot = snapshot + return NewBlockBlobURL(p.URL(), bb.blobClient.Pipeline()) +} + +// WithVersionID creates a new BlockBlobURRL object identical to the source but with the specified version id. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (bb BlockBlobURL) WithVersionID(versionId string) BlockBlobURL { + p := NewBlobURLParts(bb.URL()) + p.VersionID = versionId + return NewBlockBlobURL(p.URL(), bb.blobClient.Pipeline()) +} + +func (bb BlockBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) { + return bb.blobClient.GetAccountInfo(ctx) +} + +// Upload creates a new block blob or overwrites an existing block blob. +// Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not +// supported with Upload; the content of the existing blob is overwritten with the new content. To +// perform a partial update of a block blob, use StageBlock and CommitBlockList. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (bb BlockBlobURL) Upload(ctx context.Context, body io.ReadSeeker, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions, tier AccessTierType, blobTagsMap BlobTagsMap, cpk ClientProvidedKeyOptions, immutability ImmutabilityPolicyOptions) (*BlockBlobUploadResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + count, err := validateSeekableStreamAt0AndGetCount(body) + blobTagsString := SerializeBlobTagsHeader(blobTagsMap) + immutabilityExpiry, immutabilityMode, legalHold := immutability.pointers() + if err != nil { + return nil, err + } + return bb.bbClient.Upload(ctx, body, count, nil, nil, + &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, + &h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition, + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V + cpk.EncryptionScope, // CPK-N + tier, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil, + blobTagsString, // Blob tags + // immutability policy + immutabilityExpiry, immutabilityMode, legalHold, + ) +} + +// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block. +func (bb BlockBlobURL) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeeker, ac LeaseAccessConditions, transactionalMD5 []byte, cpk ClientProvidedKeyOptions) (*BlockBlobStageBlockResponse, error) { + count, err := validateSeekableStreamAt0AndGetCount(body) + if err != nil { + return nil, err + } + return bb.bbClient.StageBlock(ctx, base64BlockID, count, body, transactionalMD5, nil, nil, ac.pointers(), + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V + cpk.EncryptionScope, // CPK-N + nil) +} + +// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList. +// If count is CountToEnd (0), then data is read from specified offset to the end. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url. +func (bb BlockBlobURL) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL url.URL, offset int64, count int64, destinationAccessConditions LeaseAccessConditions, sourceAccessConditions ModifiedAccessConditions, cpk ClientProvidedKeyOptions, sourceAuthorization TokenCredential) (*BlockBlobStageBlockFromURLResponse, error) { + sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers() + return bb.bbClient.StageBlockFromURL(ctx, base64BlockID, 0, sourceURL.String(), httpRange{offset: offset, count: count}.pointers(), nil, nil, nil, + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK + cpk.EncryptionScope, // CPK-N + destinationAccessConditions.pointers(), sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil, tokenCredentialPointers(sourceAuthorization)) +} + +// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob. +// In order to be written as part of a blob, a block must have been successfully written +// to the server in a prior PutBlock operation. You can call PutBlockList to update a blob +// by uploading only those blocks that have changed, then committing the new and existing +// blocks together. Any blocks not specified in the block list and permanently deleted. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list. +func (bb BlockBlobURL) CommitBlockList(ctx context.Context, base64BlockIDs []string, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions, tier AccessTierType, blobTagsMap BlobTagsMap, cpk ClientProvidedKeyOptions, immutability ImmutabilityPolicyOptions) (*BlockBlobCommitBlockListResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + blobTagsString := SerializeBlobTagsHeader(blobTagsMap) + immutabilityExpiry, immutabilityMode, legalHold := immutability.pointers() + return bb.bbClient.CommitBlockList(ctx, BlockLookupList{Latest: base64BlockIDs}, nil, + &h.CacheControl, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, nil, nil, + metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition, + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK + cpk.EncryptionScope, // CPK-N + tier, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil, + blobTagsString, // Blob tags + // immutability policy + immutabilityExpiry, immutabilityMode, legalHold, + ) +} + +// GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list. +func (bb BlockBlobURL) GetBlockList(ctx context.Context, listType BlockListType, ac LeaseAccessConditions) (*BlockList, error) { + return bb.bbClient.GetBlockList(ctx, listType, nil, nil, ac.pointers(), + nil, // Blob ifTags + nil) +} + +// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. +func (bb BlockBlobURL) CopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions, srcContentMD5 []byte, tier AccessTierType, blobTagsMap BlobTagsMap, immutability ImmutabilityPolicyOptions, sourceAuthorization TokenCredential) (*BlobCopyFromURLResponse, error) { + srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers() + dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers() + dstLeaseID := dstac.LeaseAccessConditions.pointers() + blobTagsString := SerializeBlobTagsHeader(blobTagsMap) + immutabilityExpiry, immutabilityMode, legalHold := immutability.pointers() + return bb.blobClient.CopyFromURL(ctx, source.String(), nil, metadata, tier, + srcIfModifiedSince, srcIfUnmodifiedSince, + srcIfMatchETag, srcIfNoneMatchETag, + dstIfModifiedSince, dstIfUnmodifiedSince, + dstIfMatchETag, dstIfNoneMatchETag, + nil, // Blob ifTags + dstLeaseID, nil, srcContentMD5, + blobTagsString, // Blob tags + // immutability policy + immutabilityExpiry, immutabilityMode, legalHold, tokenCredentialPointers(sourceAuthorization)) +} + +// PutBlobFromURL synchronously creates a new Block Blob with data from the source URL up to a max length of 256MB. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob-from-url. +func (bb BlockBlobURL) PutBlobFromURL(ctx context.Context, h BlobHTTPHeaders, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions, srcContentMD5 []byte, dstContentMD5 []byte, tier AccessTierType, blobTagsMap BlobTagsMap, cpk ClientProvidedKeyOptions, sourceAuthorization TokenCredential) (*BlockBlobPutBlobFromURLResponse, error) { + + srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers() + dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers() + dstLeaseID := dstac.LeaseAccessConditions.pointers() + blobTagsString := SerializeBlobTagsHeader(blobTagsMap) + + return bb.bbClient.PutBlobFromURL(ctx, 0, source.String(), nil, nil, + &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, dstContentMD5, &h.CacheControl, + metadata, dstLeaseID, &h.ContentDisposition, cpk.EncryptionKey, cpk.EncryptionKeySha256, + cpk.EncryptionAlgorithm, cpk.EncryptionScope, tier, dstIfModifiedSince, dstIfUnmodifiedSince, + dstIfMatchETag, dstIfNoneMatchETag, nil, srcIfModifiedSince, srcIfUnmodifiedSince, + srcIfMatchETag, srcIfNoneMatchETag, nil, nil, srcContentMD5, blobTagsString, nil, tokenCredentialPointers(sourceAuthorization)) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go new file mode 100644 index 000000000..8fd786197 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go @@ -0,0 +1,319 @@ +package azblob + +import ( + "bytes" + "context" + "errors" + "fmt" + "net/url" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// A ContainerURL represents a URL to the Azure Storage container allowing you to manipulate its blobs. +type ContainerURL struct { + client containerClient +} + +// NewContainerURL creates a ContainerURL object using the specified URL and request policy pipeline. +func NewContainerURL(url url.URL, p pipeline.Pipeline) ContainerURL { + client := newContainerClient(url, p) + return ContainerURL{client: client} +} + +// URL returns the URL endpoint used by the ContainerURL object. +func (c ContainerURL) URL() url.URL { + return c.client.URL() +} + +// String returns the URL as a string. +func (c ContainerURL) String() string { + u := c.URL() + return u.String() +} + +func (c ContainerURL) GetAccountInfo(ctx context.Context) (*ContainerGetAccountInfoResponse, error) { + return c.client.GetAccountInfo(ctx) +} + +// WithPipeline creates a new ContainerURL object identical to the source but with the specified request policy pipeline. +func (c ContainerURL) WithPipeline(p pipeline.Pipeline) ContainerURL { + return NewContainerURL(c.URL(), p) +} + +// NewBlobURL creates a new BlobURL object by concatenating blobName to the end of +// ContainerURL's URL. The new BlobURL uses the same request policy pipeline as the ContainerURL. +// To change the pipeline, create the BlobURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewBlobURL instead of calling this object's +// NewBlobURL method. +func (c ContainerURL) NewBlobURL(blobName string) BlobURL { + blobURL := appendToURLPath(c.URL(), blobName) + return NewBlobURL(blobURL, c.client.Pipeline()) +} + +// NewAppendBlobURL creates a new AppendBlobURL object by concatenating blobName to the end of +// ContainerURL's URL. The new AppendBlobURL uses the same request policy pipeline as the ContainerURL. +// To change the pipeline, create the AppendBlobURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewAppendBlobURL instead of calling this object's +// NewAppendBlobURL method. +func (c ContainerURL) NewAppendBlobURL(blobName string) AppendBlobURL { + blobURL := appendToURLPath(c.URL(), blobName) + return NewAppendBlobURL(blobURL, c.client.Pipeline()) +} + +// NewBlockBlobURL creates a new BlockBlobURL object by concatenating blobName to the end of +// ContainerURL's URL. The new BlockBlobURL uses the same request policy pipeline as the ContainerURL. +// To change the pipeline, create the BlockBlobURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewBlockBlobURL instead of calling this object's +// NewBlockBlobURL method. +func (c ContainerURL) NewBlockBlobURL(blobName string) BlockBlobURL { + blobURL := appendToURLPath(c.URL(), blobName) + return NewBlockBlobURL(blobURL, c.client.Pipeline()) +} + +// NewPageBlobURL creates a new PageBlobURL object by concatenating blobName to the end of +// ContainerURL's URL. The new PageBlobURL uses the same request policy pipeline as the ContainerURL. +// To change the pipeline, create the PageBlobURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewPageBlobURL instead of calling this object's +// NewPageBlobURL method. +func (c ContainerURL) NewPageBlobURL(blobName string) PageBlobURL { + blobURL := appendToURLPath(c.URL(), blobName) + return NewPageBlobURL(blobURL, c.client.Pipeline()) +} + +// Create creates a new container within a storage account. If a container with the same name already exists, the operation fails. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container. +func (c ContainerURL) Create(ctx context.Context, metadata Metadata, publicAccessType PublicAccessType) (*ContainerCreateResponse, error) { + return c.client.Create(ctx, nil, metadata, publicAccessType, nil, + nil, nil, // container encryption + ) +} + +// Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container. +func (c ContainerURL) Delete(ctx context.Context, ac ContainerAccessConditions) (*ContainerDeleteResponse, error) { + if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone { + return nil, errors.New("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service") + } + + ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers() + return c.client.Delete(ctx, nil, ac.LeaseAccessConditions.pointers(), + ifModifiedSince, ifUnmodifiedSince, nil) +} + +// GetProperties returns the container's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata. +func (c ContainerURL) GetProperties(ctx context.Context, ac LeaseAccessConditions) (*ContainerGetPropertiesResponse, error) { + // NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties. + // This allows us to not expose a GetProperties method at all simplifying the API. + return c.client.GetProperties(ctx, nil, ac.pointers(), nil) +} + +// SetMetadata sets the container's metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata. +func (c ContainerURL) SetMetadata(ctx context.Context, metadata Metadata, ac ContainerAccessConditions) (*ContainerSetMetadataResponse, error) { + if !ac.IfUnmodifiedSince.IsZero() || ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone { + return nil, errors.New("the IfUnmodifiedSince, IfMatch, and IfNoneMatch must have their default values because they are ignored by the blob service") + } + ifModifiedSince, _, _, _ := ac.ModifiedAccessConditions.pointers() + return c.client.SetMetadata(ctx, nil, ac.LeaseAccessConditions.pointers(), metadata, ifModifiedSince, nil) +} + +// GetAccessPolicy returns the container's access policy. The access policy indicates whether container's blobs may be accessed publicly. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-acl. +func (c ContainerURL) GetAccessPolicy(ctx context.Context, ac LeaseAccessConditions) (*SignedIdentifiers, error) { + return c.client.GetAccessPolicy(ctx, nil, ac.pointers(), nil) +} + +// The AccessPolicyPermission type simplifies creating the permissions string for a container's access policy. +// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field. +type AccessPolicyPermission struct { + Read, Add, Create, Write, Delete, List bool +} + +// String produces the access policy permission string for an Azure Storage container. +// Call this method to set AccessPolicy's Permission field. +func (p AccessPolicyPermission) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.List { + b.WriteRune('l') + } + return b.String() +} + +// Parse initializes the AccessPolicyPermission's fields from a string. +func (p *AccessPolicyPermission) Parse(s string) error { + *p = AccessPolicyPermission{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'l': + p.List = true + default: + return fmt.Errorf("invalid permission: '%v'", r) + } + } + return nil +} + +// SetAccessPolicy sets the container's permissions. The access policy indicates whether blobs in a container may be accessed publicly. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl. +func (c ContainerURL) SetAccessPolicy(ctx context.Context, accessType PublicAccessType, si []SignedIdentifier, + ac ContainerAccessConditions) (*ContainerSetAccessPolicyResponse, error) { + if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone { + return nil, errors.New("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service") + } + ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers() + return c.client.SetAccessPolicy(ctx, si, nil, ac.LeaseAccessConditions.pointers(), + accessType, ifModifiedSince, ifUnmodifiedSince, nil) +} + +// AcquireLease acquires a lease on the container for delete operations. The lease duration must be between 15 to 60 seconds, or infinite (-1). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (c ContainerURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac ModifiedAccessConditions) (*ContainerAcquireLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() + return c.client.AcquireLease(ctx, nil, &duration, &proposedID, + ifModifiedSince, ifUnmodifiedSince, nil) +} + +// RenewLease renews the container's previously-acquired lease. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (c ContainerURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*ContainerRenewLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() + return c.client.RenewLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil) +} + +// ReleaseLease releases the container's previously-acquired lease. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (c ContainerURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*ContainerReleaseLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() + return c.client.ReleaseLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil) +} + +// BreakLease breaks the container's previously-acquired lease (if it exists). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (c ContainerURL) BreakLease(ctx context.Context, period int32, ac ModifiedAccessConditions) (*ContainerBreakLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() + return c.client.BreakLease(ctx, nil, leasePeriodPointer(period), ifModifiedSince, ifUnmodifiedSince, nil) +} + +// ChangeLease changes the container's lease ID. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (c ContainerURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac ModifiedAccessConditions) (*ContainerChangeLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() + return c.client.ChangeLease(ctx, leaseID, proposedID, nil, ifModifiedSince, ifUnmodifiedSince, nil) +} + +// ListBlobsFlatSegment returns a single segment of blobs starting from the specified Marker. Use an empty +// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. +// After getting a segment, process it, and then call ListBlobsFlatSegment again (passing the the +// previously-returned Marker) to get the next segment. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. +func (c ContainerURL) ListBlobsFlatSegment(ctx context.Context, marker Marker, o ListBlobsSegmentOptions) (*ListBlobsFlatSegmentResponse, error) { + prefix, include, maxResults := o.pointers() + return c.client.ListBlobFlatSegment(ctx, prefix, marker.Val, maxResults, include, nil, nil) +} + +// ListBlobsHierarchySegment returns a single segment of blobs starting from the specified Marker. Use an empty +// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. +// After getting a segment, process it, and then call ListBlobsHierarchicalSegment again (passing the the +// previously-returned Marker) to get the next segment. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. +func (c ContainerURL) ListBlobsHierarchySegment(ctx context.Context, marker Marker, delimiter string, o ListBlobsSegmentOptions) (*ListBlobsHierarchySegmentResponse, error) { + if o.Details.Snapshots { + return nil, errors.New("snapshots are not supported in this listing operation") + } + prefix, include, maxResults := o.pointers() + return c.client.ListBlobHierarchySegment(ctx, delimiter, prefix, marker.Val, maxResults, include, nil, nil) +} + +// ListBlobsSegmentOptions defines options available when calling ListBlobs. +type ListBlobsSegmentOptions struct { + Details BlobListingDetails // No IncludeType header is produced if "" + Prefix string // No Prefix header is produced if "" + + // SetMaxResults sets the maximum desired results you want the service to return. Note, the + // service may return fewer results than requested. + // MaxResults=0 means no 'MaxResults' header specified. + MaxResults int32 +} + +func (o *ListBlobsSegmentOptions) pointers() (prefix *string, include []ListBlobsIncludeItemType, maxResults *int32) { + if o.Prefix != "" { + prefix = &o.Prefix + } + include = o.Details.slice() + if o.MaxResults != 0 { + maxResults = &o.MaxResults + } + return +} + +// BlobListingDetails indicates what additional information the service should return with each blob. +type BlobListingDetails struct { + Copy, Metadata, Snapshots, UncommittedBlobs, Deleted, Tags, Versions, Permissions, LegalHold, ImmutabilityPolicy, DeletedWithVersions bool +} + +// string produces the Include query parameter's value. +func (d *BlobListingDetails) slice() []ListBlobsIncludeItemType { + items := []ListBlobsIncludeItemType{} + // NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails! + if d.Copy { + items = append(items, ListBlobsIncludeItemCopy) + } + if d.Deleted { + items = append(items, ListBlobsIncludeItemDeleted) + } + if d.DeletedWithVersions { + items = append(items, ListBlobsIncludeItemDeletedwithversions) + } + if d.ImmutabilityPolicy { + items = append(items, ListBlobsIncludeItemImmutabilitypolicy) + } + if d.LegalHold { + items = append(items, ListBlobsIncludeItemLegalhold) + } + if d.Metadata { + items = append(items, ListBlobsIncludeItemMetadata) + } + if d.Permissions { + items = append(items, ListBlobsIncludeItemPermissions) + } + if d.Snapshots { + items = append(items, ListBlobsIncludeItemSnapshots) + } + if d.UncommittedBlobs { + items = append(items, ListBlobsIncludeItemUncommittedblobs) + } + if d.Tags { + items = append(items, ListBlobsIncludeItemTags) + } + if d.Versions { + items = append(items, ListBlobsIncludeItemVersions) + } + return items +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go new file mode 100644 index 000000000..dc57765b4 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go @@ -0,0 +1,276 @@ +package azblob + +import ( + "context" + "fmt" + "io" + "net/url" + "strconv" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +const ( + // PageBlobPageBytes indicates the number of bytes in a page (512). + PageBlobPageBytes = 512 + + // PageBlobMaxUploadPagesBytes indicates the maximum number of bytes that can be sent in a call to PutPage. + PageBlobMaxUploadPagesBytes = 4 * 1024 * 1024 // 4MB +) + +// PageBlobURL defines a set of operations applicable to page blobs. +type PageBlobURL struct { + BlobURL + pbClient pageBlobClient +} + +// NewPageBlobURL creates a PageBlobURL object using the specified URL and request policy pipeline. +func NewPageBlobURL(url url.URL, p pipeline.Pipeline) PageBlobURL { + blobClient := newBlobClient(url, p) + pbClient := newPageBlobClient(url, p) + return PageBlobURL{BlobURL: BlobURL{blobClient: blobClient}, pbClient: pbClient} +} + +// WithPipeline creates a new PageBlobURL object identical to the source but with the specific request policy pipeline. +func (pb PageBlobURL) WithPipeline(p pipeline.Pipeline) PageBlobURL { + return NewPageBlobURL(pb.blobClient.URL(), p) +} + +// WithSnapshot creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (pb PageBlobURL) WithSnapshot(snapshot string) PageBlobURL { + p := NewBlobURLParts(pb.URL()) + p.Snapshot = snapshot + return NewPageBlobURL(p.URL(), pb.blobClient.Pipeline()) +} + +// WithVersionID creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (pb PageBlobURL) WithVersionID(versionId string) PageBlobURL { + p := NewBlobURLParts(pb.URL()) + p.VersionID = versionId + return NewPageBlobURL(p.URL(), pb.blobClient.Pipeline()) +} + +func (pb PageBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) { + return pb.blobClient.GetAccountInfo(ctx) +} + +// Create creates a page blob of the specified length. Call PutPage to upload data to a page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (pb PageBlobURL) Create(ctx context.Context, size int64, sequenceNumber int64, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions, tier PremiumPageBlobAccessTierType, blobTagsMap BlobTagsMap, cpk ClientProvidedKeyOptions, immutability ImmutabilityPolicyOptions) (*PageBlobCreateResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + blobTagsString := SerializeBlobTagsHeader(blobTagsMap) + immutabilityExpiry, immutabilityMode, legalHold := immutability.pointers() + return pb.pbClient.Create(ctx, 0, size, nil, tier, + &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, &h.CacheControl, + metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition, + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V + cpk.EncryptionScope, // CPK-N + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob tags + &sequenceNumber, nil, + blobTagsString, // Blob tags + // immutability policy + immutabilityExpiry, immutabilityMode, legalHold, + ) +} + +// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. +func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.ReadSeeker, ac PageBlobAccessConditions, transactionalMD5 []byte, cpk ClientProvidedKeyOptions) (*PageBlobUploadPagesResponse, error) { + count, err := validateSeekableStreamAt0AndGetCount(body) + if err != nil { + return nil, err + } + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers() + return pb.pbClient.UploadPages(ctx, body, count, transactionalMD5, nil, nil, + PageRange{Start: offset, End: offset + count - 1}.pointers(), + ac.LeaseAccessConditions.pointers(), + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK + cpk.EncryptionScope, // CPK-N + ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) +} + +// UploadPagesFromURL copies 1 or more pages from a source URL to the page blob. +// The sourceOffset specifies the start offset of source data to copy from. +// The destOffset specifies the start offset of data in page blob will be written to. +// The count must be a multiple of 512 bytes. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page-from-url. +func (pb PageBlobURL) UploadPagesFromURL(ctx context.Context, sourceURL url.URL, sourceOffset int64, destOffset int64, count int64, transactionalMD5 []byte, destinationAccessConditions PageBlobAccessConditions, sourceAccessConditions ModifiedAccessConditions, cpk ClientProvidedKeyOptions, sourceAuthorization TokenCredential) (*PageBlobUploadPagesFromURLResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := destinationAccessConditions.ModifiedAccessConditions.pointers() + sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers() + ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := destinationAccessConditions.SequenceNumberAccessConditions.pointers() + return pb.pbClient.UploadPagesFromURL(ctx, sourceURL.String(), *PageRange{Start: sourceOffset, End: sourceOffset + count - 1}.pointers(), 0, + *PageRange{Start: destOffset, End: destOffset + count - 1}.pointers(), transactionalMD5, nil, nil, + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V + cpk.EncryptionScope, // CPK-N + destinationAccessConditions.LeaseAccessConditions.pointers(), + ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil, tokenCredentialPointers(sourceAuthorization)) +} + +// ClearPages frees the specified pages from the page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. +func (pb PageBlobURL) ClearPages(ctx context.Context, offset int64, count int64, ac PageBlobAccessConditions, cpk ClientProvidedKeyOptions) (*PageBlobClearPagesResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers() + return pb.pbClient.ClearPages(ctx, 0, nil, + PageRange{Start: offset, End: offset + count - 1}.pointers(), + ac.LeaseAccessConditions.pointers(), + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK + cpk.EncryptionScope, // CPK-N + ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, + ifSequenceNumberEqual, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil, nil) +} + +// GetPageRanges returns the list of valid page ranges for a page blob or snapshot of a page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. +func (pb PageBlobURL) GetPageRanges(ctx context.Context, offset int64, count int64, ac BlobAccessConditions) (*PageList, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + return pb.pbClient.GetPageRanges(ctx, nil, nil, + httpRange{offset: offset, count: count}.pointers(), + ac.LeaseAccessConditions.pointers(), + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) +} + +// GetManagedDiskPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob representing managed disk. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. +func (pb PageBlobURL) GetManagedDiskPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot *string, prevSnapshotURL *string, ac BlobAccessConditions) (*PageList, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + + return pb.pbClient.GetPageRangesDiff(ctx, nil, nil, prevSnapshot, + prevSnapshotURL, // Get managed disk diff + httpRange{offset: offset, count: count}.pointers(), + ac.LeaseAccessConditions.pointers(), + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) +} + +// GetPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. +func (pb PageBlobURL) GetPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot string, ac BlobAccessConditions) (*PageList, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + return pb.pbClient.GetPageRangesDiff(ctx, nil, nil, &prevSnapshot, + nil, // Get managed disk diff + httpRange{offset: offset, count: count}.pointers(), + ac.LeaseAccessConditions.pointers(), + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) +} + +// Resize resizes the page blob to the specified size (which must be a multiple of 512). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (pb PageBlobURL) Resize(ctx context.Context, size int64, ac BlobAccessConditions, cpk ClientProvidedKeyOptions) (*PageBlobResizeResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + return pb.pbClient.Resize(ctx, size, nil, ac.LeaseAccessConditions.pointers(), + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK + cpk.EncryptionScope, // CPK-N + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil, nil) +} + +// UpdateSequenceNumber sets the page blob's sequence number. +func (pb PageBlobURL) UpdateSequenceNumber(ctx context.Context, action SequenceNumberActionType, sequenceNumber int64, + ac BlobAccessConditions) (*PageBlobUpdateSequenceNumberResponse, error) { + sn := &sequenceNumber + if action == SequenceNumberActionIncrement { + sn = nil + } + ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.ModifiedAccessConditions.pointers() + return pb.pbClient.UpdateSequenceNumber(ctx, action, nil, + ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, + nil, sn, nil) +} + +// StartCopyIncremental begins an operation to start an incremental copy from one page blob's snapshot to this page blob. +// The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination. +// The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and +// https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots. +func (pb PageBlobURL) StartCopyIncremental(ctx context.Context, source url.URL, snapshot string, ac BlobAccessConditions) (*PageBlobCopyIncrementalResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + qp := source.Query() + qp.Set("snapshot", snapshot) + source.RawQuery = qp.Encode() + return pb.pbClient.CopyIncremental(ctx, source.String(), nil, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil, nil) +} + +func (pr PageRange) pointers() *string { + endOffset := strconv.FormatInt(int64(pr.End), 10) + asString := fmt.Sprintf("bytes=%v-%s", pr.Start, endOffset) + return &asString +} + +type PageBlobAccessConditions struct { + ModifiedAccessConditions + LeaseAccessConditions + SequenceNumberAccessConditions +} + +// SequenceNumberAccessConditions identifies page blob-specific access conditions which you optionally set. +type SequenceNumberAccessConditions struct { + // IfSequenceNumberLessThan ensures that the page blob operation succeeds + // only if the blob's sequence number is less than a value. + // IfSequenceNumberLessThan=0 means no 'IfSequenceNumberLessThan' header specified. + // IfSequenceNumberLessThan>0 means 'IfSequenceNumberLessThan' header specified with its value + // IfSequenceNumberLessThan==-1 means 'IfSequenceNumberLessThan' header specified with a value of 0 + IfSequenceNumberLessThan int64 + + // IfSequenceNumberLessThanOrEqual ensures that the page blob operation succeeds + // only if the blob's sequence number is less than or equal to a value. + // IfSequenceNumberLessThanOrEqual=0 means no 'IfSequenceNumberLessThanOrEqual' header specified. + // IfSequenceNumberLessThanOrEqual>0 means 'IfSequenceNumberLessThanOrEqual' header specified with its value + // IfSequenceNumberLessThanOrEqual=-1 means 'IfSequenceNumberLessThanOrEqual' header specified with a value of 0 + IfSequenceNumberLessThanOrEqual int64 + + // IfSequenceNumberEqual ensures that the page blob operation succeeds + // only if the blob's sequence number is equal to a value. + // IfSequenceNumberEqual=0 means no 'IfSequenceNumberEqual' header specified. + // IfSequenceNumberEqual>0 means 'IfSequenceNumberEqual' header specified with its value + // IfSequenceNumberEqual=-1 means 'IfSequenceNumberEqual' header specified with a value of 0 + IfSequenceNumberEqual int64 +} + +// pointers is for internal infrastructure. It returns the fields as pointers. +func (ac SequenceNumberAccessConditions) pointers() (snltoe *int64, snlt *int64, sne *int64) { + var zero int64 // Defaults to 0 + switch ac.IfSequenceNumberLessThan { + case -1: + snlt = &zero + case 0: + snlt = nil + default: + snlt = &ac.IfSequenceNumberLessThan + } + + switch ac.IfSequenceNumberLessThanOrEqual { + case -1: + snltoe = &zero + case 0: + snltoe = nil + default: + snltoe = &ac.IfSequenceNumberLessThanOrEqual + } + switch ac.IfSequenceNumberEqual { + case -1: + sne = &zero + case 0: + sne = nil + default: + sne = &ac.IfSequenceNumberEqual + } + return +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go new file mode 100644 index 000000000..ce3ac97dc --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go @@ -0,0 +1,177 @@ +package azblob + +import ( + "context" + "net/url" + "strings" + "time" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +const ( + // ContainerNameRoot is the special Azure Storage name used to identify a storage account's root container. + ContainerNameRoot = "$root" + + // ContainerNameLogs is the special Azure Storage name used to identify a storage account's logs container. + ContainerNameLogs = "$logs" +) + +// A ServiceURL represents a URL to the Azure Storage Blob service allowing you to manipulate blob containers. +type ServiceURL struct { + client serviceClient +} + +// NewServiceURL creates a ServiceURL object using the specified URL and request policy pipeline. +func NewServiceURL(primaryURL url.URL, p pipeline.Pipeline) ServiceURL { + client := newServiceClient(primaryURL, p) + return ServiceURL{client: client} +} + +//GetUserDelegationCredential obtains a UserDelegationKey object using the base ServiceURL object. +//OAuth is required for this call, as well as any role that can delegate access to the storage account. +func (s ServiceURL) GetUserDelegationCredential(ctx context.Context, info KeyInfo, timeout *int32, requestID *string) (UserDelegationCredential, error) { + sc := newServiceClient(s.client.url, s.client.p) + udk, err := sc.GetUserDelegationKey(ctx, info, timeout, requestID) + if err != nil { + return UserDelegationCredential{}, err + } + return NewUserDelegationCredential(strings.Split(s.client.url.Host, ".")[0], *udk), nil +} + +//TODO this was supposed to be generated +//NewKeyInfo creates a new KeyInfo struct with the correct time formatting & conversion +func NewKeyInfo(Start, Expiry time.Time) KeyInfo { + return KeyInfo{ + Start: Start.UTC().Format(SASTimeFormat), + Expiry: Expiry.UTC().Format(SASTimeFormat), + } +} + +func (s ServiceURL) GetAccountInfo(ctx context.Context) (*ServiceGetAccountInfoResponse, error) { + return s.client.GetAccountInfo(ctx) +} + +// URL returns the URL endpoint used by the ServiceURL object. +func (s ServiceURL) URL() url.URL { + return s.client.URL() +} + +// String returns the URL as a string. +func (s ServiceURL) String() string { + u := s.URL() + return u.String() +} + +// WithPipeline creates a new ServiceURL object identical to the source but with the specified request policy pipeline. +func (s ServiceURL) WithPipeline(p pipeline.Pipeline) ServiceURL { + return NewServiceURL(s.URL(), p) +} + +// NewContainerURL creates a new ContainerURL object by concatenating containerName to the end of +// ServiceURL's URL. The new ContainerURL uses the same request policy pipeline as the ServiceURL. +// To change the pipeline, create the ContainerURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewContainerURL instead of calling this object's +// NewContainerURL method. +func (s ServiceURL) NewContainerURL(containerName string) ContainerURL { + containerURL := appendToURLPath(s.URL(), containerName) + return NewContainerURL(containerURL, s.client.Pipeline()) +} + +// appendToURLPath appends a string to the end of a URL's path (prefixing the string with a '/' if required) +func appendToURLPath(u url.URL, name string) url.URL { + // e.g. "https://ms.com/a/b/?k1=v1&k2=v2#f" + // When you call url.Parse() this is what you'll get: + // Scheme: "https" + // Opaque: "" + // User: nil + // Host: "ms.com" + // Path: "/a/b/" This should start with a / and it might or might not have a trailing slash + // RawPath: "" + // ForceQuery: false + // RawQuery: "k1=v1&k2=v2" + // Fragment: "f" + if len(u.Path) == 0 || u.Path[len(u.Path)-1] != '/' { + u.Path += "/" // Append "/" to end before appending name + } + u.Path += name + return u +} + +// ListContainersFlatSegment returns a single segment of containers starting from the specified Marker. Use an empty +// Marker to start enumeration from the beginning. Container names are returned in lexicographic order. +// After getting a segment, process it, and then call ListContainersFlatSegment again (passing the the +// previously-returned Marker) to get the next segment. For more information, see +// https://docs.microsoft.com/rest/api/storageservices/list-containers2. +func (s ServiceURL) ListContainersSegment(ctx context.Context, marker Marker, o ListContainersSegmentOptions) (*ListContainersSegmentResponse, error) { + prefix, include, maxResults := o.pointers() + return s.client.ListContainersSegment(ctx, prefix, marker.Val, maxResults, include, nil, nil) +} + +// ListContainersOptions defines options available when calling ListContainers. +type ListContainersSegmentOptions struct { + Detail ListContainersDetail // No IncludeType header is produced if "" + Prefix string // No Prefix header is produced if "" + MaxResults int32 // 0 means unspecified + // TODO: update swagger to generate this type? +} + +func (o *ListContainersSegmentOptions) pointers() (prefix *string, include []ListContainersIncludeType, maxResults *int32) { + if o.Prefix != "" { + prefix = &o.Prefix + } + if o.MaxResults != 0 { + maxResults = &o.MaxResults + } + details := o.Detail.string() + if len(details) > 0 { + include = []ListContainersIncludeType{ListContainersIncludeType(details)} + } + return +} + +// ListContainersFlatDetail indicates what additional information the service should return with each container. +type ListContainersDetail struct { + // Tells the service whether to return metadata for each container. + Metadata bool + + // Show containers that have been deleted when the soft-delete feature is enabled. + // Deleted bool +} + +// string produces the Include query parameter's value. +func (d *ListContainersDetail) string() string { + items := make([]string, 0, 2) + // NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails! + if d.Metadata { + items = append(items, string(ListContainersIncludeMetadata)) + } + // if d.Deleted { + // items = append(items, string(ListContainersIncludeDeleted)) + // } + if len(items) > 0 { + return strings.Join(items, ",") + } + return string(ListContainersIncludeNone) +} + +func (bsu ServiceURL) GetProperties(ctx context.Context) (*StorageServiceProperties, error) { + return bsu.client.GetProperties(ctx, nil, nil) +} + +func (bsu ServiceURL) SetProperties(ctx context.Context, properties StorageServiceProperties) (*ServiceSetPropertiesResponse, error) { + return bsu.client.SetProperties(ctx, properties, nil, nil) +} + +func (bsu ServiceURL) GetStatistics(ctx context.Context) (*StorageServiceStats, error) { + return bsu.client.GetStatistics(ctx, nil, nil) +} + +// FindBlobsByTags operation finds all blobs in the storage account whose tags match a given search expression. +// Filter blobs searches across all containers within a storage account but can be scoped within the expression to a single container. +// https://docs.microsoft.com/en-us/rest/api/storageservices/find-blobs-by-tags +// eg. "dog='germanshepherd' and penguin='emperorpenguin'" +// To specify a container, eg. "@container=’containerName’ and Name = ‘C’" +func (bsu ServiceURL) FindBlobsByTags(ctx context.Context, timeout *int32, requestID *string, where *string, marker Marker, maxResults *int32) (*FilterBlobSegment, error) { + return bsu.client.FilterBlobs(ctx, timeout, requestID, where, marker.Val, maxResults) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/user_delegation_credential.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/user_delegation_credential.go new file mode 100644 index 000000000..9fcbbc409 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/user_delegation_credential.go @@ -0,0 +1,38 @@ +package azblob + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" +) + +// NewUserDelegationCredential creates a new UserDelegationCredential using a Storage account's name and a user delegation key from it +func NewUserDelegationCredential(accountName string, key UserDelegationKey) UserDelegationCredential { + return UserDelegationCredential{ + accountName: accountName, + accountKey: key, + } +} + +type UserDelegationCredential struct { + accountName string + accountKey UserDelegationKey +} + +// AccountName returns the Storage account's name +func (f UserDelegationCredential) AccountName() string { + return f.accountName +} + +// ComputeHMAC +func (f UserDelegationCredential) ComputeHMACSHA256(message string) (base64String string) { + bytes, _ := base64.StdEncoding.DecodeString(f.accountKey.Value) + h := hmac.New(sha256.New, bytes) + h.Write([]byte(message)) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +// Private method to return important parameters for NewSASQueryParameters +func (f UserDelegationCredential) getUDKParams() *UserDelegationKey { + return &f.accountKey +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go new file mode 100644 index 000000000..d89ccb09c --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go @@ -0,0 +1,3 @@ +package azblob + +const serviceLibVersion = "0.15" diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_anonymous.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_anonymous.go new file mode 100644 index 000000000..a81987d54 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_anonymous.go @@ -0,0 +1,55 @@ +package azblob + +import ( + "context" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// Credential represent any credential type; it is used to create a credential policy Factory. +type Credential interface { + pipeline.Factory + credentialMarker() +} + +type credentialFunc pipeline.FactoryFunc + +func (f credentialFunc) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { + return f(next, po) +} + +// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. +func (credentialFunc) credentialMarker() {} + +////////////////////////////// + +// NewAnonymousCredential creates an anonymous credential for use with HTTP(S) requests that read public resource +// or for use with Shared Access Signatures (SAS). +func NewAnonymousCredential() Credential { + return anonymousCredentialFactory +} + +var anonymousCredentialFactory Credential = &anonymousCredentialPolicyFactory{} // Singleton + +// anonymousCredentialPolicyFactory is the credential's policy factory. +type anonymousCredentialPolicyFactory struct { +} + +// New creates a credential policy object. +func (f *anonymousCredentialPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { + return &anonymousCredentialPolicy{next: next} +} + +// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. +func (*anonymousCredentialPolicyFactory) credentialMarker() {} + +// anonymousCredentialPolicy is the credential's policy object. +type anonymousCredentialPolicy struct { + next pipeline.Policy +} + +// Do implements the credential's policy interface. +func (p anonymousCredentialPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + // For anonymous credentials, this is effectively a no-op + return p.next.Do(ctx, request) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_shared_key.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_shared_key.go new file mode 100644 index 000000000..cc59cbbed --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_shared_key.go @@ -0,0 +1,205 @@ +package azblob + +import ( + "bytes" + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "errors" + "net/http" + "net/url" + "sort" + "strings" + "time" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + bytes, err := base64.StdEncoding.DecodeString(accountKey) + if err != nil { + return &SharedKeyCredential{}, err + } + return &SharedKeyCredential{accountName: accountName, accountKey: bytes}, nil +} + +// SharedKeyCredential contains an account's name and its primary or secondary key. +// It is immutable making it shareable and goroutine-safe. +type SharedKeyCredential struct { + // Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only + accountName string + accountKey []byte +} + +// AccountName returns the Storage account's name. +func (f SharedKeyCredential) AccountName() string { + return f.accountName +} + +func (f SharedKeyCredential) getAccountKey() []byte { + return f.accountKey +} + +// noop function to satisfy StorageAccountCredential interface +func (f SharedKeyCredential) getUDKParams() *UserDelegationKey { + return nil +} + +// New creates a credential policy object. +func (f *SharedKeyCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { + return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + // Add a x-ms-date header if it doesn't already exist + if d := request.Header.Get(headerXmsDate); d == "" { + request.Header[headerXmsDate] = []string{time.Now().UTC().Format(http.TimeFormat)} + } + stringToSign, err := f.buildStringToSign(request) + if err != nil { + return nil, err + } + signature := f.ComputeHMACSHA256(stringToSign) + authHeader := strings.Join([]string{"SharedKey ", f.accountName, ":", signature}, "") + request.Header[headerAuthorization] = []string{authHeader} + + response, err := next.Do(ctx, request) + if err != nil && response != nil && response.Response() != nil && response.Response().StatusCode == http.StatusForbidden { + // Service failed to authenticate request, log it + po.Log(pipeline.LogError, "===== HTTP Forbidden status, String-to-Sign:\n"+stringToSign+"\n===============================\n") + } + return response, err + }) +} + +// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. +func (*SharedKeyCredential) credentialMarker() {} + +// Constants ensuring that header names are correctly spelled and consistently cased. +const ( + headerAuthorization = "Authorization" + headerCacheControl = "Cache-Control" + headerContentEncoding = "Content-Encoding" + headerContentDisposition = "Content-Disposition" + headerContentLanguage = "Content-Language" + headerContentLength = "Content-Length" + headerContentMD5 = "Content-MD5" + headerContentType = "Content-Type" + headerDate = "Date" + headerIfMatch = "If-Match" + headerIfModifiedSince = "If-Modified-Since" + headerIfNoneMatch = "If-None-Match" + headerIfUnmodifiedSince = "If-Unmodified-Since" + headerRange = "Range" + headerUserAgent = "User-Agent" + headerXmsDate = "x-ms-date" + headerXmsVersion = "x-ms-version" +) + +// ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. +func (f SharedKeyCredential) ComputeHMACSHA256(message string) (base64String string) { + h := hmac.New(sha256.New, f.accountKey) + h.Write([]byte(message)) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func (f *SharedKeyCredential) buildStringToSign(request pipeline.Request) (string, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services + headers := request.Header + contentLength := headers.Get(headerContentLength) + if contentLength == "0" { + contentLength = "" + } + + canonicalizedResource, err := f.buildCanonicalizedResource(request.URL) + if err != nil { + return "", err + } + + stringToSign := strings.Join([]string{ + request.Method, + headers.Get(headerContentEncoding), + headers.Get(headerContentLanguage), + contentLength, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + "", // Empty date because x-ms-date is expected (as per web page above) + headers.Get(headerIfModifiedSince), + headers.Get(headerIfMatch), + headers.Get(headerIfNoneMatch), + headers.Get(headerIfUnmodifiedSince), + headers.Get(headerRange), + buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + return stringToSign, nil +} + +func buildCanonicalizedHeader(headers http.Header) string { + cm := map[string][]string{} + for k, v := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + if strings.HasPrefix(headerName, "x-ms-") { + cm[headerName] = v // NOTE: the value must not have any whitespace around it. + } + } + if len(cm) == 0 { + return "" + } + + keys := make([]string, 0, len(cm)) + for key := range cm { + keys = append(keys, key) + } + sort.Strings(keys) + ch := bytes.NewBufferString("") + for i, key := range keys { + if i > 0 { + ch.WriteRune('\n') + } + ch.WriteString(key) + ch.WriteRune(':') + ch.WriteString(strings.Join(cm[key], ",")) + } + return ch.String() +} + +func (f *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services + cr := bytes.NewBufferString("/") + cr.WriteString(f.accountName) + + if len(u.Path) > 0 { + // Any portion of the CanonicalizedResource string that is derived from + // the resource's URI should be encoded exactly as it is in the URI. + // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx + cr.WriteString(u.EscapedPath()) + } else { + // a slash is required to indicate the root path + cr.WriteString("/") + } + + // params is a map[string][]string; param name is key; params values is []string + params, err := url.ParseQuery(u.RawQuery) // Returns URL decoded values + if err != nil { + return "", errors.New("parsing query parameters must succeed, otherwise there might be serious problems in the SDK/generated code") + } + + if len(params) > 0 { // There is at least 1 query parameter + paramNames := []string{} // We use this to sort the parameter key names + for paramName := range params { + paramNames = append(paramNames, paramName) // paramNames must be lowercase + } + sort.Strings(paramNames) + + for _, paramName := range paramNames { + paramValues := params[paramName] + sort.Strings(paramValues) + + // Join the sorted key values separated by ',' + // Then prepend "keyName:"; then add this string to the buffer + cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ",")) + } + } + return cr.String(), nil +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_token.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_token.go new file mode 100644 index 000000000..19d8ea418 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_token.go @@ -0,0 +1,146 @@ +package azblob + +import ( + "context" + "errors" + "sync/atomic" + + "runtime" + "sync" + "time" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// TokenRefresher represents a callback method that you write; this method is called periodically +// so you can refresh the token credential's value. +type TokenRefresher func(credential TokenCredential) time.Duration + +// TokenCredential represents a token credential (which is also a pipeline.Factory). +type TokenCredential interface { + Credential + Token() string + SetToken(newToken string) +} + +func tokenCredentialPointers(credential TokenCredential) *string { + if credential == nil { + return nil + } + + out := "Bearer " + credential.Token() + return &out +} + +// NewTokenCredential creates a token credential for use with role-based access control (RBAC) access to Azure Storage +// resources. You initialize the TokenCredential with an initial token value. If you pass a non-nil value for +// tokenRefresher, then the function you pass will be called immediately so it can refresh and change the +// TokenCredential's token value by calling SetToken. Your tokenRefresher function must return a time.Duration +// indicating how long the TokenCredential object should wait before calling your tokenRefresher function again. +// If your tokenRefresher callback fails to refresh the token, you can return a duration of 0 to stop your +// TokenCredential object from ever invoking tokenRefresher again. Also, one way to deal with failing to refresh a +// token is to cancel a context.Context object used by requests that have the TokenCredential object in their pipeline. +func NewTokenCredential(initialToken string, tokenRefresher TokenRefresher) TokenCredential { + tc := &tokenCredential{} + tc.SetToken(initialToken) // We don't set it above to guarantee atomicity + if tokenRefresher == nil { + return tc // If no callback specified, return the simple tokenCredential + } + + tcwr := &tokenCredentialWithRefresh{token: tc} + tcwr.token.startRefresh(tokenRefresher) + runtime.SetFinalizer(tcwr, func(deadTC *tokenCredentialWithRefresh) { + deadTC.token.stopRefresh() + deadTC.token = nil // Sanity (not really required) + }) + return tcwr +} + +// tokenCredentialWithRefresh is a wrapper over a token credential. +// When this wrapper object gets GC'd, it stops the tokenCredential's timer +// which allows the tokenCredential object to also be GC'd. +type tokenCredentialWithRefresh struct { + token *tokenCredential +} + +// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. +func (*tokenCredentialWithRefresh) credentialMarker() {} + +// Token returns the current token value +func (f *tokenCredentialWithRefresh) Token() string { return f.token.Token() } + +// SetToken changes the current token value +func (f *tokenCredentialWithRefresh) SetToken(token string) { f.token.SetToken(token) } + +// New satisfies pipeline.Factory's New method creating a pipeline policy object. +func (f *tokenCredentialWithRefresh) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { + return f.token.New(next, po) +} + +// ///////////////////////////////////////////////////////////////////////////// + +// tokenCredential is a pipeline.Factory is the credential's policy factory. +type tokenCredential struct { + token atomic.Value + + // The members below are only used if the user specified a tokenRefresher callback function. + timer *time.Timer + tokenRefresher TokenRefresher + lock sync.Mutex + stopped bool +} + +// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. +func (*tokenCredential) credentialMarker() {} + +// Token returns the current token value +func (f *tokenCredential) Token() string { return f.token.Load().(string) } + +// SetToken changes the current token value +func (f *tokenCredential) SetToken(token string) { f.token.Store(token) } + +// startRefresh calls refresh which immediately calls tokenRefresher +// and then starts a timer to call tokenRefresher in the future. +func (f *tokenCredential) startRefresh(tokenRefresher TokenRefresher) { + f.tokenRefresher = tokenRefresher + f.stopped = false // In case user calls StartRefresh, StopRefresh, & then StartRefresh again + f.refresh() +} + +// refresh calls the user's tokenRefresher so they can refresh the token (by +// calling SetToken) and then starts another time (based on the returned duration) +// in order to refresh the token again in the future. +func (f *tokenCredential) refresh() { + d := f.tokenRefresher(f) // Invoke the user's refresh callback outside of the lock + if d > 0 { // If duration is 0 or negative, refresher wants to not be called again + f.lock.Lock() + if !f.stopped { + f.timer = time.AfterFunc(d, f.refresh) + } + f.lock.Unlock() + } +} + +// stopRefresh stops any pending timer and sets stopped field to true to prevent +// any new timer from starting. +// NOTE: Stopping the timer allows the GC to destroy the tokenCredential object. +func (f *tokenCredential) stopRefresh() { + f.lock.Lock() + f.stopped = true + if f.timer != nil { + f.timer.Stop() + } + f.lock.Unlock() +} + +// New satisfies pipeline.Factory's New method creating a pipeline policy object. +func (f *tokenCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { + return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + if request.URL.Scheme != "https" { + // HTTPS must be used, otherwise the tokens are at the risk of being exposed + return nil, errors.New("token credentials require a URL using the https protocol scheme") + } + request.Header[headerAuthorization] = []string{"Bearer " + f.Token()} + return next.Do(ctx, request) + }) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go new file mode 100644 index 000000000..ba99255c1 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go @@ -0,0 +1,45 @@ +package azblob + +import ( + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// PipelineOptions is used to configure a request policy pipeline's retry policy and logging. +type PipelineOptions struct { + // Log configures the pipeline's logging infrastructure indicating what information is logged and where. + Log pipeline.LogOptions + + // Retry configures the built-in retry policy behavior. + Retry RetryOptions + + // RequestLog configures the built-in request logging policy. + RequestLog RequestLogOptions + + // Telemetry configures the built-in telemetry policy behavior. + Telemetry TelemetryOptions + + // HTTPSender configures the sender of HTTP requests + HTTPSender pipeline.Factory +} + +// NewPipeline creates a Pipeline using the specified credentials and options. +func NewPipeline(c Credential, o PipelineOptions) pipeline.Pipeline { + // Closest to API goes first; closest to the wire goes last + f := []pipeline.Factory{ + NewTelemetryPolicyFactory(o.Telemetry), + NewUniqueRequestIDPolicyFactory(), + NewRetryPolicyFactory(o.Retry), + } + + if _, ok := c.(*anonymousCredentialPolicyFactory); !ok { + // For AnonymousCredential, we optimize out the policy factory since it doesn't do anything + // NOTE: The credential's policy factory must appear close to the wire so it can sign any + // changes made by other factories (like UniqueRequestIDPolicyFactory) + f = append(f, c) + } + f = append(f, + NewRequestLogPolicyFactory(o.RequestLog), + pipeline.MethodFactoryMarker()) // indicates at what stage in the pipeline the method factory is invoked + + return pipeline.NewPipeline(f, pipeline.Options{HTTPSender: o.HTTPSender, Log: o.Log}) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go new file mode 100644 index 000000000..ddc83cc78 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go @@ -0,0 +1,194 @@ +package azblob + +import ( + "bytes" + "context" + "fmt" + "net/http" + "net/url" + "runtime" + "strings" + "time" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// RequestLogOptions configures the retry policy's behavior. +type RequestLogOptions struct { + // LogWarningIfTryOverThreshold logs a warning if a tried operation takes longer than the specified + // duration (-1=no logging; 0=default threshold). + LogWarningIfTryOverThreshold time.Duration + + // SyslogDisabled is a flag to check if logging to Syslog/Windows-Event-Logger is enabled or not + // We by default print to Syslog/Windows-Event-Logger. + // If SyslogDisabled is not provided explicitly, the default value will be false. + SyslogDisabled bool +} + +func (o RequestLogOptions) defaults() RequestLogOptions { + if o.LogWarningIfTryOverThreshold == 0 { + // It would be good to relate this to https://azure.microsoft.com/en-us/support/legal/sla/storage/v1_2/ + // But this monitors the time to get the HTTP response; NOT the time to download the response body. + o.LogWarningIfTryOverThreshold = 3 * time.Second // Default to 3 seconds + } + return o +} + +// NewRequestLogPolicyFactory creates a RequestLogPolicyFactory object configured using the specified options. +func NewRequestLogPolicyFactory(o RequestLogOptions) pipeline.Factory { + o = o.defaults() // Force defaults to be calculated + return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { + // These variables are per-policy; shared by multiple calls to Do + var try int32 + operationStart := time.Now() // If this is the 1st try, record the operation state time + return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) { + try++ // The first try is #1 (not #0) + + // Log the outgoing request as informational + if po.ShouldLog(pipeline.LogInfo) { + b := &bytes.Buffer{} + fmt.Fprintf(b, "==> OUTGOING REQUEST (Try=%d)\n", try) + pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), nil, nil) + po.Log(pipeline.LogInfo, b.String()) + } + + // Set the time for this particular retry operation and then Do the operation. + tryStart := time.Now() + response, err = next.Do(ctx, request) // Make the request + tryEnd := time.Now() + tryDuration := tryEnd.Sub(tryStart) + opDuration := tryEnd.Sub(operationStart) + + logLevel, forceLog := pipeline.LogInfo, false // Default logging information + + // If the response took too long, we'll upgrade to warning. + if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold { + // Log a warning if the try duration exceeded the specified threshold + logLevel, forceLog = pipeline.LogWarning, !o.SyslogDisabled + } + + var sc int + if err == nil { // We got a valid response from the service + sc = response.Response().StatusCode + } else { // We got an error, so we should inspect if we got a response + if se, ok := err.(StorageError); ok { + if r := se.Response(); r != nil { + sc = r.StatusCode + } + } + } + + if sc == 0 || ((sc >= 400 && sc <= 499) && sc != http.StatusNotFound && sc != http.StatusConflict && + sc != http.StatusPreconditionFailed && sc != http.StatusRequestedRangeNotSatisfiable) || (sc >= 500 && sc <= 599) { + logLevel, forceLog = pipeline.LogError, !o.SyslogDisabled // Promote to Error any 4xx (except those listed is an error) or any 5xx + } else { + // For other status codes, we leave the level as is. + } + + if shouldLog := po.ShouldLog(logLevel); forceLog || shouldLog { + // We're going to log this; build the string to log + b := &bytes.Buffer{} + slow := "" + if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold { + slow = fmt.Sprintf("[SLOW >%v]", o.LogWarningIfTryOverThreshold) + } + fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v%s, OpTime=%v) -- ", try, tryDuration, slow, opDuration) + if err != nil { // This HTTP request did not get a response from the service + fmt.Fprint(b, "REQUEST ERROR\n") + } else { + if logLevel == pipeline.LogError { + fmt.Fprint(b, "RESPONSE STATUS CODE ERROR\n") + } else { + fmt.Fprint(b, "RESPONSE SUCCESSFULLY RECEIVED\n") + } + } + + pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), response.Response(), err) + if logLevel <= pipeline.LogError { + b.Write(stack()) // For errors (or lower levels), we append the stack trace (an expensive operation) + } + msg := b.String() + + if forceLog { + pipeline.ForceLog(logLevel, msg) + } + if shouldLog { + po.Log(logLevel, msg) + } + } + return response, err + } + }) +} + +// RedactSigQueryParam redacts the 'sig' query parameter in URL's raw query to protect secret. +func RedactSigQueryParam(rawQuery string) (bool, string) { + rawQuery = strings.ToLower(rawQuery) // lowercase the string so we can look for ?sig= and &sig= + sigFound := strings.Contains(rawQuery, "?sig=") + if !sigFound { + sigFound = strings.Contains(rawQuery, "&sig=") + if !sigFound { + return sigFound, rawQuery // [?|&]sig= not found; return same rawQuery passed in (no memory allocation) + } + } + // [?|&]sig= found, redact its value + values, _ := url.ParseQuery(rawQuery) + for name := range values { + if strings.EqualFold(name, "sig") { + values[name] = []string{"REDACTED"} + } + } + return sigFound, values.Encode() +} + +func prepareRequestForLogging(request pipeline.Request) *http.Request { + req := request + if sigFound, rawQuery := RedactSigQueryParam(req.URL.RawQuery); sigFound { + // Make copy so we don't destroy the query parameters we actually need to send in the request + req = request.Copy() + req.Request.URL.RawQuery = rawQuery + } + + return prepareRequestForServiceLogging(req) +} + +func stack() []byte { + buf := make([]byte, 1024) + for { + n := runtime.Stack(buf, false) + if n < len(buf) { + return buf[:n] + } + buf = make([]byte, 2*len(buf)) + } +} + +/////////////////////////////////////////////////////////////////////////////////////// +// Redact phase useful for blob and file service only. For other services, +// this method can directly return request.Request. +/////////////////////////////////////////////////////////////////////////////////////// +func prepareRequestForServiceLogging(request pipeline.Request) *http.Request { + req := request + if exist, key := doesHeaderExistCaseInsensitive(req.Header, xMsCopySourceHeader); exist { + req = request.Copy() + url, err := url.Parse(req.Header.Get(key)) + if err == nil { + if sigFound, rawQuery := RedactSigQueryParam(url.RawQuery); sigFound { + url.RawQuery = rawQuery + req.Header.Set(xMsCopySourceHeader, url.String()) + } + } + } + return req.Request +} + +const xMsCopySourceHeader = "x-ms-copy-source" + +func doesHeaderExistCaseInsensitive(header http.Header, key string) (bool, string) { + for keyInHeader := range header { + if strings.EqualFold(keyInHeader, key) { + return true, keyInHeader + } + } + return false, "" +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go new file mode 100644 index 000000000..6286431a8 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go @@ -0,0 +1,419 @@ +package azblob + +import ( + "context" + "errors" + "io" + "io/ioutil" + "math/rand" + "net" + "net/http" + "strconv" + "strings" + "time" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// RetryPolicy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants. +type RetryPolicy int32 + +const ( + // RetryPolicyExponential tells the pipeline to use an exponential back-off retry policy + RetryPolicyExponential RetryPolicy = 0 + + // RetryPolicyFixed tells the pipeline to use a fixed back-off retry policy + RetryPolicyFixed RetryPolicy = 1 +) + +// RetryOptions configures the retry policy's behavior. +type RetryOptions struct { + // Policy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants.\ + // A value of zero means that you accept our default policy. + Policy RetryPolicy + + // MaxTries specifies the maximum number of attempts an operation will be tried before producing an error (0=default). + // A value of zero means that you accept our default policy. A value of 1 means 1 try and no retries. + MaxTries int32 + + // TryTimeout indicates the maximum time allowed for any single try of an HTTP request. + // A value of zero means that you accept our default timeout. NOTE: When transferring large amounts + // of data, the default TryTimeout will probably not be sufficient. You should override this value + // based on the bandwidth available to the host machine and proximity to the Storage service. A good + // starting point may be something like (60 seconds per MB of anticipated-payload-size). + TryTimeout time.Duration + + // RetryDelay specifies the amount of delay to use before retrying an operation (0=default). + // When RetryPolicy is specified as RetryPolicyExponential, the delay increases exponentially + // with each retry up to a maximum specified by MaxRetryDelay. + // If you specify 0, then you must also specify 0 for MaxRetryDelay. + // If you specify RetryDelay, then you must also specify MaxRetryDelay, and MaxRetryDelay should be + // equal to or greater than RetryDelay. + RetryDelay time.Duration + + // MaxRetryDelay specifies the maximum delay allowed before retrying an operation (0=default). + // If you specify 0, then you must also specify 0 for RetryDelay. + MaxRetryDelay time.Duration + + // RetryReadsFromSecondaryHost specifies whether the retry policy should retry a read operation against another host. + // If RetryReadsFromSecondaryHost is "" (the default) then operations are not retried against another host. + // NOTE: Before setting this field, make sure you understand the issues around reading stale & potentially-inconsistent + // data at this webpage: https://docs.microsoft.com/en-us/azure/storage/common/storage-designing-ha-apps-with-ragrs + RetryReadsFromSecondaryHost string // Comment this our for non-Blob SDKs +} + +func (o RetryOptions) retryReadsFromSecondaryHost() string { + return o.RetryReadsFromSecondaryHost // This is for the Blob SDK only + //return "" // This is for non-blob SDKs +} + +func (o RetryOptions) defaults() RetryOptions { + // We assume the following: + // 1. o.Policy should either be RetryPolicyExponential or RetryPolicyFixed + // 2. o.MaxTries >= 0 + // 3. o.TryTimeout, o.RetryDelay, and o.MaxRetryDelay >=0 + // 4. o.RetryDelay <= o.MaxRetryDelay + // 5. Both o.RetryDelay and o.MaxRetryDelay must be 0 or neither can be 0 + + IfDefault := func(current *time.Duration, desired time.Duration) { + if *current == time.Duration(0) { + *current = desired + } + } + + // Set defaults if unspecified + if o.MaxTries == 0 { + o.MaxTries = 4 + } + switch o.Policy { + case RetryPolicyExponential: + IfDefault(&o.TryTimeout, 1*time.Minute) + IfDefault(&o.RetryDelay, 4*time.Second) + IfDefault(&o.MaxRetryDelay, 120*time.Second) + + case RetryPolicyFixed: + IfDefault(&o.TryTimeout, 1*time.Minute) + IfDefault(&o.RetryDelay, 30*time.Second) + IfDefault(&o.MaxRetryDelay, 120*time.Second) + } + return o +} + +func (o RetryOptions) calcDelay(try int32) time.Duration { // try is >=1; never 0 + pow := func(number int64, exponent int32) int64 { // pow is nested helper function + var result int64 = 1 + for n := int32(0); n < exponent; n++ { + result *= number + } + return result + } + + delay := time.Duration(0) + switch o.Policy { + case RetryPolicyExponential: + delay = time.Duration(pow(2, try-1)-1) * o.RetryDelay + + case RetryPolicyFixed: + if try > 1 { // Any try after the 1st uses the fixed delay + delay = o.RetryDelay + } + } + + // Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3) + // For casts and rounding - be careful, as per https://github.com/golang/go/issues/20757 + delay = time.Duration(float32(delay) * (rand.Float32()/2 + 0.8)) // NOTE: We want math/rand; not crypto/rand + if delay > o.MaxRetryDelay { + delay = o.MaxRetryDelay + } + return delay +} + +// NewRetryPolicyFactory creates a RetryPolicyFactory object configured using the specified options. +func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory { + o = o.defaults() // Force defaults to be calculated + return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { + return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) { + // Before each try, we'll select either the primary or secondary URL. + primaryTry := int32(0) // This indicates how many tries we've attempted against the primary DC + + // We only consider retrying against a secondary if we have a read request (GET/HEAD) AND this policy has a Secondary URL it can use + considerSecondary := (request.Method == http.MethodGet || request.Method == http.MethodHead) && o.retryReadsFromSecondaryHost() != "" + + // Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) + // When to retry: connection failure or temporary/timeout. NOTE: StorageError considers HTTP 500/503 as temporary & is therefore retryable + // If using a secondary: + // Even tries go against primary; odd tries go against the secondary + // For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, 1.2) + // If secondary gets a 404, don't fail, retry but future retries are only against the primary + // When retrying against a secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2)) + for try := int32(1); try <= o.MaxTries; try++ { + logf("\n=====> Try=%d\n", try) + + // Determine which endpoint to try. It's primary if there is no secondary or if it is an add # attempt. + tryingPrimary := !considerSecondary || (try%2 == 1) + // Select the correct host and delay + if tryingPrimary { + primaryTry++ + delay := o.calcDelay(primaryTry) + logf("Primary try=%d, Delay=%v\n", primaryTry, delay) + time.Sleep(delay) // The 1st try returns 0 delay + } else { + // For casts and rounding - be careful, as per https://github.com/golang/go/issues/20757 + delay := time.Duration(float32(time.Second) * (rand.Float32()/2 + 0.8)) + logf("Secondary try=%d, Delay=%v\n", try-primaryTry, delay) + time.Sleep(delay) // Delay with some jitter before trying secondary + } + + // Clone the original request to ensure that each try starts with the original (unmutated) request. + requestCopy := request.Copy() + + // For each try, seek to the beginning of the Body stream. We do this even for the 1st try because + // the stream may not be at offset 0 when we first get it and we want the same behavior for the + // 1st try as for additional tries. + err = requestCopy.RewindBody() + if err != nil { + return nil, errors.New("we must be able to seek on the Body Stream, otherwise retries would cause data corruption") + } + + if !tryingPrimary { + requestCopy.URL.Host = o.retryReadsFromSecondaryHost() + requestCopy.Host = o.retryReadsFromSecondaryHost() + } + + // Set the server-side timeout query parameter "timeout=[seconds]" + timeout := o.TryTimeout // Max time per try + if deadline, ok := ctx.Deadline(); ok { // If user's ctx has a deadline, make the timeout the smaller of the two + t := deadline.Sub(time.Now()) // Duration from now until user's ctx reaches its deadline + logf("MaxTryTimeout=%d secs, TimeTilDeadline=%d sec\n", int32(timeout.Seconds()), int32(t.Seconds())) + if t < timeout { + timeout = t + } + if timeout < 0 { + timeout = 0 // If timeout ever goes negative, set it to zero; this happen while debugging + } + logf("TryTimeout adjusted to=%d sec\n", int32(timeout.Seconds())) + } + q := requestCopy.Request.URL.Query() + q.Set("timeout", strconv.Itoa(int(timeout.Seconds()+1))) // Add 1 to "round up" + requestCopy.Request.URL.RawQuery = q.Encode() + logf("Url=%s\n", requestCopy.Request.URL.String()) + + // Set the time for this particular retry operation and then Do the operation. + tryCtx, tryCancel := context.WithTimeout(ctx, timeout) + //requestCopy.Body = &deadlineExceededReadCloser{r: requestCopy.Request.Body} + response, err = next.Do(tryCtx, requestCopy) // Make the request + /*err = improveDeadlineExceeded(err) + if err == nil { + response.Response().Body = &deadlineExceededReadCloser{r: response.Response().Body} + }*/ + logf("Err=%v, response=%v\n", err, response) + + action := "" // This MUST get changed within the switch code below + switch { + case ctx.Err() != nil: + action = "NoRetry: Op timeout" + case !tryingPrimary && response != nil && response.Response() != nil && response.Response().StatusCode == http.StatusNotFound: + // If attempt was against the secondary & it returned a StatusNotFound (404), then + // the resource was not found. This may be due to replication delay. So, in this + // case, we'll never try the secondary again for this operation. + considerSecondary = false + action = "Retry: Secondary URL returned 404" + case err != nil: + // NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation. + // Use ServiceCode to verify if the error is related to storage service-side, + // ServiceCode is set only when error related to storage service happened. + if stErr, ok := err.(StorageError); ok { + if stErr.Temporary() { + action = "Retry: StorageError with error service code and Temporary()" + } else if stErr.Response() != nil && isSuccessStatusCode(stErr.Response()) { // TODO: This is a temporarily work around, remove this after protocol layer fix the issue that net.Error is wrapped as storageError + action = "Retry: StorageError with success status code" + } else { + action = "NoRetry: StorageError not Temporary() and without retriable status code" + } + } else if netErr, ok := err.(net.Error); ok { + // Use non-retriable net.Error list, but not retriable list. + // As there are errors without Temporary() implementation, + // while need be retried, like 'connection reset by peer', 'transport connection broken' and etc. + // So the SDK do retry for most of the case, unless the error should not be retried for sure. + if !isNotRetriable(netErr) { + action = "Retry: net.Error and not in the non-retriable list" + } else { + action = "NoRetry: net.Error and in the non-retriable list" + } + } else if err == io.ErrUnexpectedEOF { + action = "Retry: unexpected EOF" + } else { + action = "NoRetry: unrecognized error" + } + default: + action = "NoRetry: successful HTTP request" // no error + } + + logf("Action=%s\n", action) + // fmt.Println(action + "\n") // This is where we could log the retry operation; action is why we're retrying + if action[0] != 'R' { // Retry only if action starts with 'R' + if err != nil { + tryCancel() // If we're returning an error, cancel this current/last per-retry timeout context + } else { + // We wrap the last per-try context in a body and overwrite the Response's Body field with our wrapper. + // So, when the user closes the Body, then our per-try context gets closed too. + // Another option, is that the Last Policy do this wrapping for a per-retry context (not for the user's context) + if response == nil || response.Response() == nil { + // We do panic in the case response or response.Response() is nil, + // as for client, the response should not be nil if request is sent and the operations is executed successfully. + // Another option, is that execute the cancel function when response or response.Response() is nil, + // as in this case, current per-try has nothing to do in future. + return nil, errors.New("invalid state, response should not be nil when the operation is executed successfully") + } + if response.Response().Body == http.NoBody { + // If the response is empty the caller isn't obligated to call close + tryCancel(); + } else { + response.Response().Body = &contextCancelReadCloser{cf: tryCancel, body: response.Response().Body} + } + } + break // Don't retry + } + if response != nil && response.Response() != nil && response.Response().Body != nil { + // If we're going to retry and we got a previous response, then flush its body to avoid leaking its TCP connection + body := response.Response().Body + io.Copy(ioutil.Discard, body) + body.Close() + } + // If retrying, cancel the current per-try timeout context + tryCancel() + } + return response, err // Not retryable or too many retries; return the last response/error + } + }) +} + +// contextCancelReadCloser helps to invoke context's cancelFunc properly when the ReadCloser is closed. +type contextCancelReadCloser struct { + cf context.CancelFunc + body io.ReadCloser +} + +func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) { + return rc.body.Read(p) +} + +func (rc *contextCancelReadCloser) Close() error { + err := rc.body.Close() + if rc.cf != nil { + rc.cf() + } + return err +} + +// isNotRetriable checks if the provided net.Error isn't retriable. +func isNotRetriable(errToParse net.Error) bool { + // No error, so this is NOT retriable. + if errToParse == nil { + return true + } + + // The error is either temporary or a timeout so it IS retriable (not not retriable). + if errToParse.Temporary() || errToParse.Timeout() { + return false + } + + genericErr := error(errToParse) + + // From here all the error are neither Temporary() nor Timeout(). + switch err := errToParse.(type) { + case *net.OpError: + // The net.Error is also a net.OpError but the inner error is nil, so this is not retriable. + if err.Err == nil { + return true + } + genericErr = err.Err + } + + switch genericErr.(type) { + case *net.AddrError, net.UnknownNetworkError, *net.DNSError, net.InvalidAddrError, *net.ParseError, *net.DNSConfigError: + // If the error is one of the ones listed, then it is NOT retriable. + return true + } + + // If it's invalid header field name/value error thrown by http module, then it is NOT retriable. + // This could happen when metadata's key or value is invalid. (RoundTrip in transport.go) + if strings.Contains(genericErr.Error(), "invalid header field") { + return true + } + + // Assume the error is retriable. + return false +} + +var successStatusCodes = []int{http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent, http.StatusPartialContent} + +func isSuccessStatusCode(resp *http.Response) bool { + if resp == nil { + return false + } + for _, i := range successStatusCodes { + if i == resp.StatusCode { + return true + } + } + return false +} + +// According to https://github.com/golang/go/wiki/CompilerOptimizations, the compiler will inline this method and hopefully optimize all calls to it away +var logf = func(format string, a ...interface{}) {} + +// Use this version to see the retry method's code path (import "fmt") +//var logf = fmt.Printf + +/* +type deadlineExceededReadCloser struct { + r io.ReadCloser +} + +func (r *deadlineExceededReadCloser) Read(p []byte) (int, error) { + n, err := 0, io.EOF + if r.r != nil { + n, err = r.r.Read(p) + } + return n, improveDeadlineExceeded(err) +} +func (r *deadlineExceededReadCloser) Seek(offset int64, whence int) (int64, error) { + // For an HTTP request, the ReadCloser MUST also implement seek + // For an HTTP response, Seek MUST not be called (or this will panic) + o, err := r.r.(io.Seeker).Seek(offset, whence) + return o, improveDeadlineExceeded(err) +} +func (r *deadlineExceededReadCloser) Close() error { + if c, ok := r.r.(io.Closer); ok { + c.Close() + } + return nil +} + +// timeoutError is the internal struct that implements our richer timeout error. +type deadlineExceeded struct { + responseError +} + +var _ net.Error = (*deadlineExceeded)(nil) // Ensure deadlineExceeded implements the net.Error interface at compile time + +// improveDeadlineExceeded creates a timeoutError object that implements the error interface IF cause is a context.DeadlineExceeded error. +func improveDeadlineExceeded(cause error) error { + // If cause is not DeadlineExceeded, return the same error passed in. + if cause != context.DeadlineExceeded { + return cause + } + // Else, convert DeadlineExceeded to our timeoutError which gives a richer string message + return &deadlineExceeded{ + responseError: responseError{ + ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3), + }, + } +} + +// Error implements the error interface's Error method to return a string representation of the error. +func (e *deadlineExceeded) Error() string { + return e.ErrorNode.Error("context deadline exceeded; when creating a pipeline, consider increasing RetryOptions' TryTimeout field") +} +*/ diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_telemetry.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_telemetry.go new file mode 100644 index 000000000..608e1051c --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_telemetry.go @@ -0,0 +1,51 @@ +package azblob + +import ( + "bytes" + "context" + "fmt" + "os" + "runtime" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// TelemetryOptions configures the telemetry policy's behavior. +type TelemetryOptions struct { + // Value is a string prepended to each request's User-Agent and sent to the service. + // The service records the user-agent in logs for diagnostics and tracking of client requests. + Value string +} + +// NewTelemetryPolicyFactory creates a factory that can create telemetry policy objects +// which add telemetry information to outgoing HTTP requests. +func NewTelemetryPolicyFactory(o TelemetryOptions) pipeline.Factory { + b := &bytes.Buffer{} + b.WriteString(o.Value) + if b.Len() > 0 { + b.WriteRune(' ') + } + fmt.Fprintf(b, "Azure-Storage/%s %s", serviceLibVersion, platformInfo) + telemetryValue := b.String() + + return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { + return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + request.Header.Set("User-Agent", telemetryValue) + return next.Do(ctx, request) + } + }) +} + +// NOTE: the ONLY function that should write to this variable is this func +var platformInfo = func() string { + // Azure-Storage/version (runtime; os type and version)” + // Azure-Storage/1.4.0 (NODE-VERSION v4.5.0; Windows_NT 10.0.14393)' + operatingSystem := runtime.GOOS // Default OS string + switch operatingSystem { + case "windows": + operatingSystem = os.Getenv("OS") // Get more specific OS information + case "linux": // accept default OS info + case "freebsd": // accept default OS info + } + return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem) +}() diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_unique_request_id.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_unique_request_id.go new file mode 100644 index 000000000..1f7817d2d --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_unique_request_id.go @@ -0,0 +1,36 @@ +package azblob + +import ( + "context" + "errors" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// NewUniqueRequestIDPolicyFactory creates a UniqueRequestIDPolicyFactory object +// that sets the request's x-ms-client-request-id header if it doesn't already exist. +func NewUniqueRequestIDPolicyFactory() pipeline.Factory { + return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { + // This is Policy's Do method: + return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + id := request.Header.Get(xMsClientRequestID) + if id == "" { // Add a unique request ID if the caller didn't specify one already + id = newUUID().String() + request.Header.Set(xMsClientRequestID, id) + } + + resp, err := next.Do(ctx, request) + + if err == nil && resp != nil { + crId := resp.Response().Header.Get(xMsClientRequestID) + if crId != "" && crId != id { + err = errors.New("client Request ID from request and response does not match") + } + } + + return resp, err + } + }) +} + +const xMsClientRequestID = "x-ms-client-request-id" diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go new file mode 100644 index 000000000..ad38f597e --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go @@ -0,0 +1,186 @@ +package azblob + +import ( + "context" + "io" + "net" + "net/http" + "strings" + "sync" +) + +const CountToEnd = 0 + +// HTTPGetter is a function type that refers to a method that performs an HTTP GET operation. +type HTTPGetter func(ctx context.Context, i HTTPGetterInfo) (*http.Response, error) + +// HTTPGetterInfo is passed to an HTTPGetter function passing it parameters +// that should be used to make an HTTP GET request. +type HTTPGetterInfo struct { + // Offset specifies the start offset that should be used when + // creating the HTTP GET request's Range header + Offset int64 + + // Count specifies the count of bytes that should be used to calculate + // the end offset when creating the HTTP GET request's Range header + Count int64 + + // ETag specifies the resource's etag that should be used when creating + // the HTTP GET request's If-Match header + ETag ETag +} + +// FailedReadNotifier is a function type that represents the notification function called when a read fails +type FailedReadNotifier func(failureCount int, lastError error, offset int64, count int64, willRetry bool) + +// RetryReaderOptions contains properties which can help to decide when to do retry. +type RetryReaderOptions struct { + // MaxRetryRequests specifies the maximum number of HTTP GET requests that will be made + // while reading from a RetryReader. A value of zero means that no additional HTTP + // GET requests will be made. + MaxRetryRequests int + doInjectError bool + doInjectErrorRound int + injectedError error + + // NotifyFailedRead is called, if non-nil, after any failure to read. Expected usage is diagnostic logging. + NotifyFailedRead FailedReadNotifier + + // TreatEarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default, + // retryReader has the following special behaviour: closing the response body before it is all read is treated as a + // retryable error. This is to allow callers to force a retry by closing the body from another goroutine (e.g. if the = + // read is too slow, caller may want to force a retry in the hope that the retry will be quicker). If + // TreatEarlyCloseAsError is true, then retryReader's special behaviour is suppressed, and "read on closed body" is instead + // treated as a fatal (non-retryable) error. + // Note that setting TreatEarlyCloseAsError only guarantees that Closing will produce a fatal error if the Close happens + // from the same "thread" (goroutine) as Read. Concurrent Close calls from other goroutines may instead produce network errors + // which will be retried. + TreatEarlyCloseAsError bool + + ClientProvidedKeyOptions ClientProvidedKeyOptions +} + +// retryReader implements io.ReaderCloser methods. +// retryReader tries to read from response, and if there is retriable network error +// returned during reading, it will retry according to retry reader option through executing +// user defined action with provided data to get a new response, and continue the overall reading process +// through reading from the new response. +type retryReader struct { + ctx context.Context + info HTTPGetterInfo + countWasBounded bool + o RetryReaderOptions + getter HTTPGetter + + // we support Close-ing during Reads (from other goroutines), so we protect the shared state, which is response + responseMu *sync.Mutex + response *http.Response +} + +// NewRetryReader creates a retry reader. +func NewRetryReader(ctx context.Context, initialResponse *http.Response, + info HTTPGetterInfo, o RetryReaderOptions, getter HTTPGetter) io.ReadCloser { + return &retryReader{ + ctx: ctx, + getter: getter, + info: info, + countWasBounded: info.Count != CountToEnd, + response: initialResponse, + responseMu: &sync.Mutex{}, + o: o} +} + +func (s *retryReader) setResponse(r *http.Response) { + s.responseMu.Lock() + defer s.responseMu.Unlock() + s.response = r +} + +func (s *retryReader) Read(p []byte) (n int, err error) { + for try := 0; ; try++ { + //fmt.Println(try) // Comment out for debugging. + if s.countWasBounded && s.info.Count == CountToEnd { + // User specified an original count and the remaining bytes are 0, return 0, EOF + return 0, io.EOF + } + + s.responseMu.Lock() + resp := s.response + s.responseMu.Unlock() + if resp == nil { // We don't have a response stream to read from, try to get one. + newResponse, err := s.getter(s.ctx, s.info) + if err != nil { + return 0, err + } + // Successful GET; this is the network stream we'll read from. + s.setResponse(newResponse) + resp = newResponse + } + n, err := resp.Body.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running) + + // Injection mechanism for testing. + if s.o.doInjectError && try == s.o.doInjectErrorRound { + if s.o.injectedError != nil { + err = s.o.injectedError + } else { + err = &net.DNSError{IsTemporary: true} + } + } + + // We successfully read data or end EOF. + if err == nil || err == io.EOF { + s.info.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future + if s.info.Count != CountToEnd { + s.info.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future + } + return n, err // Return the return to the caller + } + s.Close() // Error, close stream + s.setResponse(nil) // Our stream is no longer good + + // Check the retry count and error code, and decide whether to retry. + retriesExhausted := try >= s.o.MaxRetryRequests + _, isNetError := err.(net.Error) + isUnexpectedEOF := err == io.ErrUnexpectedEOF + willRetry := (isNetError || isUnexpectedEOF || s.wasRetryableEarlyClose(err)) && !retriesExhausted + + // Notify, for logging purposes, of any failures + if s.o.NotifyFailedRead != nil { + failureCount := try + 1 // because try is zero-based + s.o.NotifyFailedRead(failureCount, err, s.info.Offset, s.info.Count, willRetry) + } + + if willRetry { + continue + // Loop around and try to get and read from new stream. + } + return n, err // Not retryable, or retries exhausted, so just return + } +} + +// By default, we allow early Closing, from another concurrent goroutine, to be used to force a retry +// Is this safe, to close early from another goroutine? Early close ultimately ends up calling +// net.Conn.Close, and that is documented as "Any blocked Read or Write operations will be unblocked and return errors" +// which is exactly the behaviour we want. +// NOTE: that if caller has forced an early Close from a separate goroutine (separate from the Read) +// then there are two different types of error that may happen - either the one one we check for here, +// or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine +// to check for one, since the other is a net.Error, which our main Read retry loop is already handing. +func (s *retryReader) wasRetryableEarlyClose(err error) bool { + if s.o.TreatEarlyCloseAsError { + return false // user wants all early closes to be errors, and so not retryable + } + // unfortunately, http.errReadOnClosedResBody is private, so the best we can do here is to check for its text + return strings.HasSuffix(err.Error(), ReadOnClosedBodyMessage) +} + +const ReadOnClosedBodyMessage = "read on closed response body" + +func (s *retryReader) Close() error { + s.responseMu.Lock() + defer s.responseMu.Unlock() + if s.response != nil && s.response.Body != nil { + return s.response.Body.Close() + } + return nil +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_account.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_account.go new file mode 100644 index 000000000..6b84d95e3 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_account.go @@ -0,0 +1,244 @@ +package azblob + +import ( + "bytes" + "errors" + "fmt" + "strings" + "time" +) + +// AccountSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas +type AccountSASSignatureValues struct { + Version string `param:"sv"` // If not specified, this defaults to SASVersion + Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + Permissions string `param:"sp"` // Create by initializing a AccountSASPermissions and then call String() + IPRange IPRange `param:"sip"` + Services string `param:"ss"` // Create by initializing AccountSASServices and then call String() + ResourceTypes string `param:"srt"` // Create by initializing AccountSASResourceTypes and then call String() +} + +// NewSASQueryParameters uses an account's shared key credential to sign this signature values to produce +// the proper SAS query parameters. +func (v AccountSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) (SASQueryParameters, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS + if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" { + return SASQueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") + } + if v.Version == "" { + v.Version = SASVersion + } + perms := &AccountSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + return SASQueryParameters{}, err + } + v.Permissions = perms.String() + + startTime, expiryTime, _ := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime, time.Time{}) + + stringToSign := strings.Join([]string{ + sharedKeyCredential.AccountName(), + v.Permissions, + v.Services, + v.ResourceTypes, + startTime, + expiryTime, + v.IPRange.String(), + string(v.Protocol), + v.Version, + ""}, // That right, the account SAS requires a terminating extra newline + "\n") + + signature := sharedKeyCredential.ComputeHMACSHA256(stringToSign) + p := SASQueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Account-specific SAS parameters + services: v.Services, + resourceTypes: v.ResourceTypes, + + // Calculated SAS signature + signature: signature, + } + + return p, nil +} + +// The AccountSASPermissions type simplifies creating the permissions string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field. +type AccountSASPermissions struct { + Read, Write, Delete, DeletePreviousVersion, List, Add, Create, Update, Process, Tag, FilterByTags, PermanentDelete, Immutability bool +} + +// String produces the SAS permissions string for an Azure Storage account. +// Call this method to set AccountSASSignatureValues's Permissions field. +func (p AccountSASPermissions) String() string { + var buffer bytes.Buffer + if p.Read { + buffer.WriteRune('r') + } + if p.Write { + buffer.WriteRune('w') + } + if p.Delete { + buffer.WriteRune('d') + } + if p.DeletePreviousVersion { + buffer.WriteRune('x') + } + if p.List { + buffer.WriteRune('l') + } + if p.Add { + buffer.WriteRune('a') + } + if p.Create { + buffer.WriteRune('c') + } + if p.Update { + buffer.WriteRune('u') + } + if p.Process { + buffer.WriteRune('p') + } + if p.Tag { + buffer.WriteRune('t') + } + if p.FilterByTags { + buffer.WriteRune('f') + } + if p.PermanentDelete { + buffer.WriteRune('y') + } + if p.Immutability { + buffer.WriteRune('i') + } + return buffer.String() +} + +// Parse initializes the AccountSASPermissions's fields from a string. +func (p *AccountSASPermissions) Parse(s string) error { + *p = AccountSASPermissions{} // Clear out the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'l': + p.List = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'u': + p.Update = true + case 'p': + p.Process = true + case 'x': + p.Process = true + case 't': + p.Tag = true + case 'f': + p.FilterByTags = true + case 'y': + p.PermanentDelete = true + case 'i': + p.Immutability = true + default: + return fmt.Errorf("invalid permission character: '%v'", r) + } + } + return nil +} + +// The AccountSASServices type simplifies creating the services string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Services field. +type AccountSASServices struct { + Blob, Queue, File bool +} + +// String produces the SAS services string for an Azure Storage account. +// Call this method to set AccountSASSignatureValues's Services field. +func (s AccountSASServices) String() string { + var buffer bytes.Buffer + if s.Blob { + buffer.WriteRune('b') + } + if s.Queue { + buffer.WriteRune('q') + } + if s.File { + buffer.WriteRune('f') + } + return buffer.String() +} + +// Parse initializes the AccountSASServices' fields from a string. +func (a *AccountSASServices) Parse(s string) error { + *a = AccountSASServices{} // Clear out the flags + for _, r := range s { + switch r { + case 'b': + a.Blob = true + case 'q': + a.Queue = true + case 'f': + a.File = true + default: + return fmt.Errorf("Invalid service character: '%v'", r) + } + } + return nil +} + +// The AccountSASResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's ResourceTypes field. +type AccountSASResourceTypes struct { + Service, Container, Object bool +} + +// String produces the SAS resource types string for an Azure Storage account. +// Call this method to set AccountSASSignatureValues's ResourceTypes field. +func (rt AccountSASResourceTypes) String() string { + var buffer bytes.Buffer + if rt.Service { + buffer.WriteRune('s') + } + if rt.Container { + buffer.WriteRune('c') + } + if rt.Object { + buffer.WriteRune('o') + } + return buffer.String() +} + +// Parse initializes the AccountSASResourceType's fields from a string. +func (rt *AccountSASResourceTypes) Parse(s string) error { + *rt = AccountSASResourceTypes{} // Clear out the flags + for _, r := range s { + switch r { + case 's': + rt.Service = true + case 'c': + rt.Container = true + case 'o': + rt.Object = true + default: + return fmt.Errorf("Invalid resource type: '%v'", r) + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go new file mode 100644 index 000000000..bef67624c --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go @@ -0,0 +1,393 @@ +package azblob + +import ( + "errors" + "net" + "net/url" + "strings" + "time" +) + +// SASVersion indicates the SAS version. +const SASVersion = ServiceVersion + +type SASProtocol string + +const ( + // SASProtocolHTTPS can be specified for a SAS protocol + SASProtocolHTTPS SASProtocol = "https" + + // SASProtocolHTTPSandHTTP can be specified for a SAS protocol + SASProtocolHTTPSandHTTP SASProtocol = "https,http" +) + +// FormatTimesForSASSigning converts a time.Time to a snapshotTimeFormat string suitable for a +// SASField's StartTime or ExpiryTime fields. Returns "" if value.IsZero(). +func FormatTimesForSASSigning(startTime, expiryTime, snapshotTime time.Time) (string, string, string) { + ss := "" + if !startTime.IsZero() { + ss = formatSASTimeWithDefaultFormat(&startTime) + } + se := "" + if !expiryTime.IsZero() { + se = formatSASTimeWithDefaultFormat(&expiryTime) + } + sh := "" + if !snapshotTime.IsZero() { + sh = snapshotTime.Format(SnapshotTimeFormat) + } + return ss, se, sh +} + +// SASTimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time. +const SASTimeFormat = "2006-01-02T15:04:05Z" //"2017-07-27T00:00:00Z" // ISO 8601 +var SASTimeFormats = []string{"2006-01-02T15:04:05.0000000Z", SASTimeFormat, "2006-01-02T15:04Z", "2006-01-02"} // ISO 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details. + +// formatSASTimeWithDefaultFormat format time with ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ". +func formatSASTimeWithDefaultFormat(t *time.Time) string { + return formatSASTime(t, SASTimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used +} + +// formatSASTime format time with given format, use ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ" by default. +func formatSASTime(t *time.Time, format string) string { + if format != "" { + return t.Format(format) + } + return t.Format(SASTimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used +} + +// parseSASTimeString try to parse sas time string. +func parseSASTimeString(val string) (t time.Time, timeFormat string, err error) { + for _, sasTimeFormat := range SASTimeFormats { + t, err = time.Parse(sasTimeFormat, val) + if err == nil { + timeFormat = sasTimeFormat + break + } + } + + if err != nil { + err = errors.New("fail to parse time with IOS 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details") + } + + return +} + +// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas + +// A SASQueryParameters object represents the components that make up an Azure Storage SAS' query parameters. +// You parse a map of query parameters into its fields by calling NewSASQueryParameters(). You add the components +// to a query parameter map by calling AddToValues(). +// NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type. +// +// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues). +type SASQueryParameters struct { + // All members are immutable or values so copies of this struct are goroutine-safe. + version string `param:"sv"` + services string `param:"ss"` + resourceTypes string `param:"srt"` + protocol SASProtocol `param:"spr"` + startTime time.Time `param:"st"` + expiryTime time.Time `param:"se"` + snapshotTime time.Time `param:"snapshot"` + ipRange IPRange `param:"sip"` + identifier string `param:"si"` + resource string `param:"sr"` + permissions string `param:"sp"` + signature string `param:"sig"` + cacheControl string `param:"rscc"` + contentDisposition string `param:"rscd"` + contentEncoding string `param:"rsce"` + contentLanguage string `param:"rscl"` + contentType string `param:"rsct"` + signedOid string `param:"skoid"` + signedTid string `param:"sktid"` + signedStart time.Time `param:"skt"` + signedService string `param:"sks"` + signedExpiry time.Time `param:"ske"` + signedVersion string `param:"skv"` + signedDirectoryDepth string `param:"sdd"` + preauthorizedAgentObjectId string `param:"saoid"` + agentObjectId string `param:"suoid"` + correlationId string `param:"scid"` + // private member used for startTime and expiryTime formatting. + stTimeFormat string + seTimeFormat string +} + +func (p *SASQueryParameters) PreauthorizedAgentObjectId() string { + return p.preauthorizedAgentObjectId +} + +func (p *SASQueryParameters) AgentObjectId() string { + return p.agentObjectId +} + +func (p *SASQueryParameters) SignedCorrelationId() string { + return p.correlationId +} + +func (p *SASQueryParameters) SignedTid() string { + return p.signedTid +} + +func (p *SASQueryParameters) SignedStart() time.Time { + return p.signedStart +} + +func (p *SASQueryParameters) SignedExpiry() time.Time { + return p.signedExpiry +} + +func (p *SASQueryParameters) SignedService() string { + return p.signedService +} + +func (p *SASQueryParameters) SignedVersion() string { + return p.signedVersion +} + +func (p *SASQueryParameters) SnapshotTime() time.Time { + return p.snapshotTime +} + +func (p *SASQueryParameters) Version() string { + return p.version +} + +func (p *SASQueryParameters) Services() string { + return p.services +} +func (p *SASQueryParameters) ResourceTypes() string { + return p.resourceTypes +} +func (p *SASQueryParameters) Protocol() SASProtocol { + return p.protocol +} +func (p *SASQueryParameters) StartTime() time.Time { + return p.startTime +} +func (p *SASQueryParameters) ExpiryTime() time.Time { + return p.expiryTime +} + +func (p *SASQueryParameters) IPRange() IPRange { + return p.ipRange +} + +func (p *SASQueryParameters) Identifier() string { + return p.identifier +} + +func (p *SASQueryParameters) Resource() string { + return p.resource +} +func (p *SASQueryParameters) Permissions() string { + return p.permissions +} + +func (p *SASQueryParameters) Signature() string { + return p.signature +} + +func (p *SASQueryParameters) CacheControl() string { + return p.cacheControl +} + +func (p *SASQueryParameters) ContentDisposition() string { + return p.contentDisposition +} + +func (p *SASQueryParameters) ContentEncoding() string { + return p.contentEncoding +} + +func (p *SASQueryParameters) ContentLanguage() string { + return p.contentLanguage +} + +func (p *SASQueryParameters) ContentType() string { + return p.contentType +} + +func (p *SASQueryParameters) SignedDirectoryDepth() string { + return p.signedDirectoryDepth +} + +// IPRange represents a SAS IP range's start IP and (optionally) end IP. +type IPRange struct { + Start net.IP // Not specified if length = 0 + End net.IP // Not specified if length = 0 +} + +// String returns a string representation of an IPRange. +func (ipr *IPRange) String() string { + if len(ipr.Start) == 0 { + return "" + } + start := ipr.Start.String() + if len(ipr.End) == 0 { + return start + } + return start + "-" + ipr.End.String() +} + +// NewSASQueryParameters creates and initializes a SASQueryParameters object based on the +// query parameter map's passed-in values. If deleteSASParametersFromValues is true, +// all SAS-related query parameters are removed from the passed-in map. If +// deleteSASParametersFromValues is false, the map passed-in map is unaltered. +func newSASQueryParameters(values url.Values, deleteSASParametersFromValues bool) SASQueryParameters { + p := SASQueryParameters{} + for k, v := range values { + val := v[0] + isSASKey := true + switch strings.ToLower(k) { + case "sv": + p.version = val + case "ss": + p.services = val + case "srt": + p.resourceTypes = val + case "spr": + p.protocol = SASProtocol(val) + case "snapshot": + p.snapshotTime, _ = time.Parse(SnapshotTimeFormat, val) + case "st": + p.startTime, p.stTimeFormat, _ = parseSASTimeString(val) + case "se": + p.expiryTime, p.seTimeFormat, _ = parseSASTimeString(val) + case "sip": + dashIndex := strings.Index(val, "-") + if dashIndex == -1 { + p.ipRange.Start = net.ParseIP(val) + } else { + p.ipRange.Start = net.ParseIP(val[:dashIndex]) + p.ipRange.End = net.ParseIP(val[dashIndex+1:]) + } + case "si": + p.identifier = val + case "sr": + p.resource = val + case "sp": + p.permissions = val + case "sig": + p.signature = val + case "rscc": + p.cacheControl = val + case "rscd": + p.contentDisposition = val + case "rsce": + p.contentEncoding = val + case "rscl": + p.contentLanguage = val + case "rsct": + p.contentType = val + case "skoid": + p.signedOid = val + case "sktid": + p.signedTid = val + case "skt": + p.signedStart, _ = time.Parse(SASTimeFormat, val) + case "ske": + p.signedExpiry, _ = time.Parse(SASTimeFormat, val) + case "sks": + p.signedService = val + case "skv": + p.signedVersion = val + case "sdd": + p.signedDirectoryDepth = val + case "saoid": + p.preauthorizedAgentObjectId = val + case "suoid": + p.agentObjectId = val + case "scid": + p.correlationId = val + default: + isSASKey = false // We didn't recognize the query parameter + } + if isSASKey && deleteSASParametersFromValues { + delete(values, k) + } + } + return p +} + +// AddToValues adds the SAS components to the specified query parameters map. +func (p *SASQueryParameters) addToValues(v url.Values) url.Values { + if p.version != "" { + v.Add("sv", p.version) + } + if p.services != "" { + v.Add("ss", p.services) + } + if p.resourceTypes != "" { + v.Add("srt", p.resourceTypes) + } + if p.protocol != "" { + v.Add("spr", string(p.protocol)) + } + if !p.startTime.IsZero() { + v.Add("st", formatSASTime(&(p.startTime), p.stTimeFormat)) + } + if !p.expiryTime.IsZero() { + v.Add("se", formatSASTime(&(p.expiryTime), p.seTimeFormat)) + } + if len(p.ipRange.Start) > 0 { + v.Add("sip", p.ipRange.String()) + } + if p.identifier != "" { + v.Add("si", p.identifier) + } + if p.resource != "" { + v.Add("sr", p.resource) + } + if p.permissions != "" { + v.Add("sp", p.permissions) + } + if p.signedOid != "" { + v.Add("skoid", p.signedOid) + v.Add("sktid", p.signedTid) + v.Add("skt", p.signedStart.Format(SASTimeFormat)) + v.Add("ske", p.signedExpiry.Format(SASTimeFormat)) + v.Add("sks", p.signedService) + v.Add("skv", p.signedVersion) + } + if p.signature != "" { + v.Add("sig", p.signature) + } + if p.cacheControl != "" { + v.Add("rscc", p.cacheControl) + } + if p.contentDisposition != "" { + v.Add("rscd", p.contentDisposition) + } + if p.contentEncoding != "" { + v.Add("rsce", p.contentEncoding) + } + if p.contentLanguage != "" { + v.Add("rscl", p.contentLanguage) + } + if p.contentType != "" { + v.Add("rsct", p.contentType) + } + if p.signedDirectoryDepth != "" { + v.Add("sdd", p.signedDirectoryDepth) + } + if p.preauthorizedAgentObjectId != "" { + v.Add("saoid", p.preauthorizedAgentObjectId) + } + if p.agentObjectId != "" { + v.Add("suoid", p.agentObjectId) + } + if p.correlationId != "" { + v.Add("scid", p.correlationId) + } + return v +} + +// Encode encodes the SAS query parameters into URL encoded form sorted by key. +func (p *SASQueryParameters) Encode() string { + v := url.Values{} + p.addToValues(v) + return v.Encode() +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_service_codes_common.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_service_codes_common.go new file mode 100644 index 000000000..d09ddcffc --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_service_codes_common.go @@ -0,0 +1,134 @@ +package azblob + +// https://docs.microsoft.com/en-us/rest/api/storageservices/common-rest-api-error-codes + +const ( + // ServiceCodeNone is the default value. It indicates that the error was related to the service or that the service didn't return a code. + ServiceCodeNone ServiceCodeType = "" + + // ServiceCodeAccountAlreadyExists means the specified account already exists. + ServiceCodeAccountAlreadyExists ServiceCodeType = "AccountAlreadyExists" + + // ServiceCodeAccountBeingCreated means the specified account is in the process of being created (403). + ServiceCodeAccountBeingCreated ServiceCodeType = "AccountBeingCreated" + + // ServiceCodeAccountIsDisabled means the specified account is disabled (403). + ServiceCodeAccountIsDisabled ServiceCodeType = "AccountIsDisabled" + + // ServiceCodeAuthenticationFailed means the server failed to authenticate the request. Make sure the value of the Authorization header is formed correctly including the signature (403). + ServiceCodeAuthenticationFailed ServiceCodeType = "AuthenticationFailed" + + // ServiceCodeConditionHeadersNotSupported means the condition headers are not supported (400). + ServiceCodeConditionHeadersNotSupported ServiceCodeType = "ConditionHeadersNotSupported" + + // ServiceCodeConditionNotMet means the condition specified in the conditional header(s) was not met for a read/write operation (304/412). + ServiceCodeConditionNotMet ServiceCodeType = "ConditionNotMet" + + // ServiceCodeEmptyMetadataKey means the key for one of the metadata key-value pairs is empty (400). + ServiceCodeEmptyMetadataKey ServiceCodeType = "EmptyMetadataKey" + + // ServiceCodeInsufficientAccountPermissions means read operations are currently disabled or Write operations are not allowed or The account being accessed does not have sufficient permissions to execute this operation (403). + ServiceCodeInsufficientAccountPermissions ServiceCodeType = "InsufficientAccountPermissions" + + // ServiceCodeInternalError means the server encountered an internal error. Please retry the request (500). + ServiceCodeInternalError ServiceCodeType = "InternalError" + + // ServiceCodeInvalidAuthenticationInfo means the authentication information was not provided in the correct format. Verify the value of Authorization header (400). + ServiceCodeInvalidAuthenticationInfo ServiceCodeType = "InvalidAuthenticationInfo" + + // ServiceCodeInvalidHeaderValue means the value provided for one of the HTTP headers was not in the correct format (400). + ServiceCodeInvalidHeaderValue ServiceCodeType = "InvalidHeaderValue" + + // ServiceCodeInvalidHTTPVerb means the HTTP verb specified was not recognized by the server (400). + ServiceCodeInvalidHTTPVerb ServiceCodeType = "InvalidHttpVerb" + + // ServiceCodeInvalidInput means one of the request inputs is not valid (400). + ServiceCodeInvalidInput ServiceCodeType = "InvalidInput" + + // ServiceCodeInvalidMd5 means the MD5 value specified in the request is invalid. The MD5 value must be 128 bits and Base64-encoded (400). + ServiceCodeInvalidMd5 ServiceCodeType = "InvalidMd5" + + // ServiceCodeInvalidMetadata means the specified metadata is invalid. It includes characters that are not permitted (400). + ServiceCodeInvalidMetadata ServiceCodeType = "InvalidMetadata" + + // ServiceCodeInvalidQueryParameterValue means an invalid value was specified for one of the query parameters in the request URI (400). + ServiceCodeInvalidQueryParameterValue ServiceCodeType = "InvalidQueryParameterValue" + + // ServiceCodeInvalidRange means the range specified is invalid for the current size of the resource (416). + ServiceCodeInvalidRange ServiceCodeType = "InvalidRange" + + // ServiceCodeInvalidResourceName means the specified resource name contains invalid characters (400). + ServiceCodeInvalidResourceName ServiceCodeType = "InvalidResourceName" + + // ServiceCodeInvalidURI means the requested URI does not represent any resource on the server (400). + ServiceCodeInvalidURI ServiceCodeType = "InvalidUri" + + // ServiceCodeInvalidXMLDocument means the specified XML is not syntactically valid (400). + ServiceCodeInvalidXMLDocument ServiceCodeType = "InvalidXmlDocument" + + // ServiceCodeInvalidXMLNodeValue means the value provided for one of the XML nodes in the request body was not in the correct format (400). + ServiceCodeInvalidXMLNodeValue ServiceCodeType = "InvalidXmlNodeValue" + + // ServiceCodeMd5Mismatch means the MD5 value specified in the request did not match the MD5 value calculated by the server (400). + ServiceCodeMd5Mismatch ServiceCodeType = "Md5Mismatch" + + // ServiceCodeMetadataTooLarge means the size of the specified metadata exceeds the maximum size permitted (400). + ServiceCodeMetadataTooLarge ServiceCodeType = "MetadataTooLarge" + + // ServiceCodeMissingContentLengthHeader means the Content-Length header was not specified (411). + ServiceCodeMissingContentLengthHeader ServiceCodeType = "MissingContentLengthHeader" + + // ServiceCodeMissingRequiredQueryParameter means a required query parameter was not specified for this request (400). + ServiceCodeMissingRequiredQueryParameter ServiceCodeType = "MissingRequiredQueryParameter" + + // ServiceCodeMissingRequiredHeader means a required HTTP header was not specified (400). + ServiceCodeMissingRequiredHeader ServiceCodeType = "MissingRequiredHeader" + + // ServiceCodeMissingRequiredXMLNode means a required XML node was not specified in the request body (400). + ServiceCodeMissingRequiredXMLNode ServiceCodeType = "MissingRequiredXmlNode" + + // ServiceCodeMultipleConditionHeadersNotSupported means multiple condition headers are not supported (400). + ServiceCodeMultipleConditionHeadersNotSupported ServiceCodeType = "MultipleConditionHeadersNotSupported" + + // ServiceCodeOperationTimedOut means the operation could not be completed within the permitted time (500). + ServiceCodeOperationTimedOut ServiceCodeType = "OperationTimedOut" + + // ServiceCodeOutOfRangeInput means one of the request inputs is out of range (400). + ServiceCodeOutOfRangeInput ServiceCodeType = "OutOfRangeInput" + + // ServiceCodeOutOfRangeQueryParameterValue means a query parameter specified in the request URI is outside the permissible range (400). + ServiceCodeOutOfRangeQueryParameterValue ServiceCodeType = "OutOfRangeQueryParameterValue" + + // ServiceCodeRequestBodyTooLarge means the size of the request body exceeds the maximum size permitted (413). + ServiceCodeRequestBodyTooLarge ServiceCodeType = "RequestBodyTooLarge" + + // ServiceCodeResourceTypeMismatch means the specified resource type does not match the type of the existing resource (409). + ServiceCodeResourceTypeMismatch ServiceCodeType = "ResourceTypeMismatch" + + // ServiceCodeRequestURLFailedToParse means the url in the request could not be parsed (400). + ServiceCodeRequestURLFailedToParse ServiceCodeType = "RequestUrlFailedToParse" + + // ServiceCodeResourceAlreadyExists means the specified resource already exists (409). + ServiceCodeResourceAlreadyExists ServiceCodeType = "ResourceAlreadyExists" + + // ServiceCodeResourceNotFound means the specified resource does not exist (404). + ServiceCodeResourceNotFound ServiceCodeType = "ResourceNotFound" + + // ServiceCodeNoAuthenticationInformation means the specified authentication for the resource does not exist (401). + ServiceCodeNoAuthenticationInformation ServiceCodeType = "NoAuthenticationInformation" + + // ServiceCodeServerBusy means the server is currently unable to receive requests. Please retry your request or Ingress/egress is over the account limit or operations per second is over the account limit (503). + ServiceCodeServerBusy ServiceCodeType = "ServerBusy" + + // ServiceCodeUnsupportedHeader means one of the HTTP headers specified in the request is not supported (400). + ServiceCodeUnsupportedHeader ServiceCodeType = "UnsupportedHeader" + + // ServiceCodeUnsupportedXMLNode means one of the XML nodes specified in the request body is not supported (400). + ServiceCodeUnsupportedXMLNode ServiceCodeType = "UnsupportedXmlNode" + + // ServiceCodeUnsupportedQueryParameter means one of the query parameters specified in the request URI is not supported (400). + ServiceCodeUnsupportedQueryParameter ServiceCodeType = "UnsupportedQueryParameter" + + // ServiceCodeUnsupportedHTTPVerb means the resource doesn't support the specified HTTP verb (405). + ServiceCodeUnsupportedHTTPVerb ServiceCodeType = "UnsupportedHttpVerb" +) diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_storage_error.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_storage_error.go new file mode 100644 index 000000000..a3cbd9817 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_storage_error.go @@ -0,0 +1,111 @@ +package azblob + +import ( + "bytes" + "encoding/xml" + "fmt" + "net/http" + "sort" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +func init() { + // wire up our custom error handling constructor + responseErrorFactory = newStorageError +} + +// ServiceCodeType is a string identifying a storage service error. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/status-and-error-codes2 +type ServiceCodeType string + +// StorageError identifies a responder-generated network or response parsing error. +type StorageError interface { + // ResponseError implements error's Error(), net.Error's Temporary() and Timeout() methods & Response(). + ResponseError + + // ServiceCode returns a service error code. Your code can use this to make error recovery decisions. + ServiceCode() ServiceCodeType +} + +// storageError is the internal struct that implements the public StorageError interface. +type storageError struct { + responseError + serviceCode ServiceCodeType + details map[string]string +} + +// newStorageError creates an error object that implements the error interface. +func newStorageError(cause error, response *http.Response, description string) error { + return &storageError{ + responseError: responseError{ + ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3), + response: response, + description: description, + }, + serviceCode: ServiceCodeType(response.Header.Get("x-ms-error-code")), + } +} + +// ServiceCode returns service-error information. The caller may examine these values but should not modify any of them. +func (e *storageError) ServiceCode() ServiceCodeType { + return e.serviceCode +} + +// Error implements the error interface's Error method to return a string representation of the error. +func (e *storageError) Error() string { + b := &bytes.Buffer{} + fmt.Fprintf(b, "===== RESPONSE ERROR (ServiceCode=%s) =====\n", e.serviceCode) + fmt.Fprintf(b, "Description=%s, Details: ", e.description) + if len(e.details) == 0 { + b.WriteString("(none)\n") + } else { + b.WriteRune('\n') + keys := make([]string, 0, len(e.details)) + // Alphabetize the details + for k := range e.details { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + fmt.Fprintf(b, " %s: %+v\n", k, e.details[k]) + } + } + req := pipeline.Request{Request: e.response.Request}.Copy() // Make a copy of the response's request + pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(req), e.response, nil) + return e.ErrorNode.Error(b.String()) +} + +// Temporary returns true if the error occurred due to a temporary condition (including an HTTP status of 500 or 503). +func (e *storageError) Temporary() bool { + if e.response != nil { + if (e.response.StatusCode == http.StatusInternalServerError) || (e.response.StatusCode == http.StatusServiceUnavailable) || (e.response.StatusCode == http.StatusBadGateway) { + return true + } + } + return e.ErrorNode.Temporary() +} + +// UnmarshalXML performs custom unmarshalling of XML-formatted Azure storage request errors. +func (e *storageError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) { + tokName := "" + var t xml.Token + for t, err = d.Token(); err == nil; t, err = d.Token() { + switch tt := t.(type) { + case xml.StartElement: + tokName = tt.Name.Local + break + case xml.CharData: + switch tokName { + case "Message": + e.description = string(tt) + default: + if e.details == nil { + e.details = map[string]string{} + } + e.details[tokName] = string(tt) + } + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_util_validate.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_util_validate.go new file mode 100644 index 000000000..d7b2507e4 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_util_validate.go @@ -0,0 +1,64 @@ +package azblob + +import ( + "errors" + "fmt" + "io" + "strconv" +) + +// httpRange defines a range of bytes within an HTTP resource, starting at offset and +// ending at offset+count. A zero-value httpRange indicates the entire resource. An httpRange +// which has an offset but na zero value count indicates from the offset to the resource's end. +type httpRange struct { + offset int64 + count int64 +} + +func (r httpRange) pointers() *string { + if r.offset == 0 && r.count == CountToEnd { // Do common case first for performance + return nil // No specified range + } + endOffset := "" // if count == CountToEnd (0) + if r.count > 0 { + endOffset = strconv.FormatInt((r.offset+r.count)-1, 10) + } + dataRange := fmt.Sprintf("bytes=%v-%s", r.offset, endOffset) + return &dataRange +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +func validateSeekableStreamAt0AndGetCount(body io.ReadSeeker) (int64, error) { + if body == nil { // nil body's are "logically" seekable to 0 and are 0 bytes long + return 0, nil + } + + err := validateSeekableStreamAt0(body) + if err != nil { + return 0, err + } + + count, err := body.Seek(0, io.SeekEnd) + if err != nil { + return 0, errors.New("body stream must be seekable") + } + + body.Seek(0, io.SeekStart) + return count, nil +} + +// return an error if body is not a valid seekable stream at 0 +func validateSeekableStreamAt0(body io.ReadSeeker) error { + if body == nil { // nil body's are "logically" seekable to 0 + return nil + } + if pos, err := body.Seek(0, io.SeekCurrent); pos != 0 || err != nil { + // Help detect programmer error + if err != nil { + return errors.New("body stream must be seekable") + } + return errors.New("body stream must be set to position 0") + } + return nil +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_uuid.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_uuid.go new file mode 100644 index 000000000..66799f9cb --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_uuid.go @@ -0,0 +1,77 @@ +package azblob + +import ( + "crypto/rand" + "fmt" + "strconv" +) + +// The UUID reserved variants. +const ( + reservedNCS byte = 0x80 + reservedRFC4122 byte = 0x40 + reservedMicrosoft byte = 0x20 + reservedFuture byte = 0x00 +) + +// A UUID representation compliant with specification in RFC 4122 document. +type uuid [16]byte + +// NewUUID returns a new uuid using RFC 4122 algorithm. +func newUUID() (u uuid) { + u = uuid{} + // Set all bits to randomly (or pseudo-randomly) chosen values. + rand.Read(u[:]) + u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122) + + var version byte = 4 + u[6] = (u[6] & 0xF) | (version << 4) // u.setVersion(4) + return +} + +// String returns an unparsed version of the generated UUID sequence. +func (u uuid) String() string { + return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} + +// ParseUUID parses a string formatted as "003020100-0504-0706-0809-0a0b0c0d0e0f" +// or "{03020100-0504-0706-0809-0a0b0c0d0e0f}" into a UUID. +func parseUUID(uuidStr string) uuid { + char := func(hexString string) byte { + i, _ := strconv.ParseUint(hexString, 16, 8) + return byte(i) + } + if uuidStr[0] == '{' { + uuidStr = uuidStr[1:] // Skip over the '{' + } + // 03020100 - 05 04 - 07 06 - 08 09 - 0a 0b 0c 0d 0e 0f + // 1 11 1 11 11 1 12 22 2 22 22 22 33 33 33 + // 01234567 8 90 12 3 45 67 8 90 12 3 45 67 89 01 23 45 + uuidVal := uuid{ + char(uuidStr[0:2]), + char(uuidStr[2:4]), + char(uuidStr[4:6]), + char(uuidStr[6:8]), + + char(uuidStr[9:11]), + char(uuidStr[11:13]), + + char(uuidStr[14:16]), + char(uuidStr[16:18]), + + char(uuidStr[19:21]), + char(uuidStr[21:23]), + + char(uuidStr[24:26]), + char(uuidStr[26:28]), + char(uuidStr[28:30]), + char(uuidStr[30:32]), + char(uuidStr[32:34]), + char(uuidStr[34:36]), + } + return uuidVal +} + +func (u uuid) bytes() []byte { + return u[:] +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zt_doc.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zt_doc.go new file mode 100644 index 000000000..6b3779c0e --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zt_doc.go @@ -0,0 +1,89 @@ +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +/* +Package azblob allows you to manipulate Azure Storage containers and blobs objects. + +URL Types + +The most common types you'll work with are the XxxURL types. The methods of these types make requests +against the Azure Storage Service. + + - ServiceURL's methods perform operations on a storage account. + - ContainerURL's methods perform operations on an account's container. + - BlockBlobURL's methods perform operations on a container's block blob. + - AppendBlobURL's methods perform operations on a container's append blob. + - PageBlobURL's methods perform operations on a container's page blob. + - BlobURL's methods perform operations on a container's blob regardless of the blob's type. + +Internally, each XxxURL object contains a URL and a request pipeline. The URL indicates the endpoint where each HTTP +request is sent and the pipeline indicates how the outgoing HTTP request and incoming HTTP response is processed. +The pipeline specifies things like retry policies, logging, deserialization of HTTP response payloads, and more. + +Pipelines are threadsafe and may be shared by multiple XxxURL objects. When you create a ServiceURL, you pass +an initial pipeline. When you call ServiceURL's NewContainerURL method, the new ContainerURL object has its own +URL but it shares the same pipeline as the parent ServiceURL object. + +To work with a blob, call one of ContainerURL's 4 NewXxxBlobURL methods depending on how you want to treat the blob. +To treat the blob as a block blob, append blob, or page blob, call NewBlockBlobURL, NewAppendBlobURL, or NewPageBlobURL +respectively. These three types are all identical except for the methods they expose; each type exposes the methods +relevant to the type of blob represented. If you're not sure how you want to treat a blob, you can call NewBlobURL; +this returns an object whose methods are relevant to any kind of blob. When you call ContainerURL's NewXxxBlobURL, +the new XxxBlobURL object has its own URL but it shares the same pipeline as the parent ContainerURL object. You +can easily switch between blob types (method sets) by calling a ToXxxBlobURL method. + +If you'd like to use a different pipeline with a ServiceURL, ContainerURL, or XxxBlobURL object, then call the XxxURL +object's WithPipeline method passing in the desired pipeline. The WithPipeline methods create a new XxxURL object +with the same URL as the original but with the specified pipeline. + +Note that XxxURL objects use little memory, are goroutine-safe, and many objects share the same pipeline. This means that +XxxURL objects share a lot of system resources making them very efficient. + +All of XxxURL's methods that make HTTP requests return rich error handling information so you can discern network failures, +transient failures, timeout failures, service failures, etc. See the StorageError interface for more information and an +example of how to do deal with errors. + +URL and Shared Access Signature Manipulation + +The library includes a BlobURLParts type for deconstructing and reconstructing URLs. And you can use the following types +for generating and parsing Shared Access Signature (SAS) + - Use the AccountSASSignatureValues type to create a SAS for a storage account. + - Use the BlobSASSignatureValues type to create a SAS for a container or blob. + - Use the SASQueryParameters type to turn signature values in to query parameres or to parse query parameters. + +To generate a SAS, you must use the SharedKeyCredential type. + +Credentials + +When creating a request pipeline, you must specify one of this package's credential types. + - Call the NewAnonymousCredential function for requests that contain a Shared Access Signature (SAS). + - Call the NewSharedKeyCredential function (with an account name & key) to access any account resources. You must also use this + to generate Shared Access Signatures. + +HTTP Request Policy Factories + +This package defines several request policy factories for use with the pipeline package. +Most applications will not use these factories directly; instead, the NewPipeline +function creates these factories, initializes them (via the PipelineOptions type) +and returns a pipeline object for use by the XxxURL objects. + +However, for advanced scenarios, developers can access these policy factories directly +and even create their own and then construct their own pipeline in order to affect HTTP +requests and responses performed by the XxxURL objects. For example, developers can +introduce their own logging, random failures, request recording & playback for fast +testing, HTTP request pacing, alternate retry mechanisms, metering, metrics, etc. The +possibilities are endless! + +Below are the request pipeline policy factory functions that are provided with this +package: + - NewRetryPolicyFactory Enables rich retry semantics for failed HTTP requests. + - NewRequestLogPolicyFactory Enables rich logging support for HTTP requests/responses & failures. + - NewTelemetryPolicyFactory Enables simple modification of the HTTP request's User-Agent header so each request reports the SDK version & language/runtime making the requests. + - NewUniqueRequestIDPolicyFactory Adds a x-ms-client-request-id header with a unique UUID value to an HTTP request to help with diagnosing failures. + +Also, note that all the NewXxxCredential functions return request policy factory objects which get injected into the pipeline. +*/ +package azblob + +// TokenCredential Use this to access resources using Role-Based Access Control (RBAC). diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go new file mode 100644 index 000000000..9a0144bf5 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go @@ -0,0 +1,532 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-pipeline-go/pipeline" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" +) + +// appendBlobClient is the client for the AppendBlob methods of the Azblob service. +type appendBlobClient struct { + managementClient +} + +// newAppendBlobClient creates an instance of the appendBlobClient client. +func newAppendBlobClient(url url.URL, p pipeline.Pipeline) appendBlobClient { + return appendBlobClient{newManagementClient(url, p)} +} + +// AppendBlock the Append Block operation commits a new block of data to the end of an existing append blob. The Append +// Block operation is permitted only if the blob was created with x-ms-blob-type set to AppendBlob. Append Block is +// supported only on version 2015-02-21 version or later. +// +// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an +// error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more +// information, see Setting +// Timeouts for Blob Service Operations. transactionalContentMD5 is specify the transactional md5 for the body, to +// be validated by the service. transactionalContentCrc64 is specify the transactional crc64 for the body, to be +// validated by the service. leaseID is if specified, the operation only succeeds if the resource's lease is active and +// matches this ID. maxSize is optional conditional header. The max length in bytes permitted for the append blob. If +// the Append Block operation would cause the blob to exceed that limit or if the blob size is already greater than the +// value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 - +// Precondition Failed). appendPosition is optional conditional header, used only for the Append Block operation. A +// number indicating the byte offset to compare. Append Block will succeed only if the append position is equal to this +// number. If it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - +// Precondition Failed). encryptionKey is optional. Specifies the encryption key to use to encrypt the data provided in +// the request. If not specified, encryption is performed with the root account encryption key. For more information, +// see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided +// encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm +// used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the +// x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the +// name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is +// performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage +// Services. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the +// specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been +// modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching +// value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a +// SQL where clause on blob tags to operate only on blobs with a matching value. requestID is provides a +// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. +func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, leaseID *string, maxSize *int64, appendPosition *int64, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*AppendBlobAppendBlockResponse, error) { + if err := validate([]validation{ + {targetValue: body, + constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.appendBlockPreparer(body, contentLength, timeout, transactionalContentMD5, transactionalContentCrc64, leaseID, maxSize, appendPosition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.appendBlockResponder}, req) + if err != nil { + return nil, err + } + return resp.(*AppendBlobAppendBlockResponse), err +} + +// appendBlockPreparer prepares the AppendBlock request. +func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, leaseID *string, maxSize *int64, appendPosition *int64, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, body) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "appendblock") + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if transactionalContentMD5 != nil { + req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) + } + if transactionalContentCrc64 != nil { + req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64)) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if maxSize != nil { + req.Header.Set("x-ms-blob-condition-maxsize", strconv.FormatInt(*maxSize, 10)) + } + if appendPosition != nil { + req.Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPosition, 10)) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// appendBlockResponder handles the response to the AppendBlock request. +func (client appendBlobClient) appendBlockResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &AppendBlobAppendBlockResponse{rawResponse: resp.Response()}, err +} + +// AppendBlockFromURL the Append Block operation commits a new block of data to the end of an existing append blob +// where the contents are read from a source url. The Append Block operation is permitted only if the blob was created +// with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later. +// +// sourceURL is specify a URL to the copy source. contentLength is the length of the request. sourceRange is bytes of +// source data in the specified range. sourceContentMD5 is specify the md5 calculated for the range of bytes that must +// be read from the copy source. sourceContentcrc64 is specify the crc64 calculated for the range of bytes that must be +// read from the copy source. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. transactionalContentMD5 is specify the transactional md5 for the body, to +// be validated by the service. encryptionKey is optional. Specifies the encryption key to use to encrypt the data +// provided in the request. If not specified, encryption is performed with the root account encryption key. For more +// information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the +// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the +// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided +// if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies +// the name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is +// performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage +// Services. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this +// ID. maxSize is optional conditional header. The max length in bytes permitted for the append blob. If the Append +// Block operation would cause the blob to exceed that limit or if the blob size is already greater than the value +// specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 - +// Precondition Failed). appendPosition is optional conditional header, used only for the Append Block operation. A +// number indicating the byte offset to compare. Append Block will succeed only if the append position is equal to this +// number. If it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - +// Precondition Failed). ifModifiedSince is specify this header value to operate only on a blob if it has been modified +// since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has +// not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a +// matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is +// specify a SQL where clause on blob tags to operate only on blobs with a matching value. sourceIfModifiedSince is +// specify this header value to operate only on a blob if it has been modified since the specified date/time. +// sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the +// specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a matching value. +// sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides +// a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. copySourceAuthorization is only Bearer type is supported. Credentials should be a +// valid OAuth access token to copy source. +func (client appendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, transactionalContentMD5 []byte, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string, copySourceAuthorization *string) (*AppendBlobAppendBlockFromURLResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.appendBlockFromURLPreparer(sourceURL, contentLength, sourceRange, sourceContentMD5, sourceContentcrc64, timeout, transactionalContentMD5, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID, copySourceAuthorization) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.appendBlockFromURLResponder}, req) + if err != nil { + return nil, err + } + return resp.(*AppendBlobAppendBlockFromURLResponse), err +} + +// appendBlockFromURLPreparer prepares the AppendBlockFromURL request. +func (client appendBlobClient) appendBlockFromURLPreparer(sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, transactionalContentMD5 []byte, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string, copySourceAuthorization *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "appendblock") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-copy-source", sourceURL) + if sourceRange != nil { + req.Header.Set("x-ms-source-range", *sourceRange) + } + if sourceContentMD5 != nil { + req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) + } + if sourceContentcrc64 != nil { + req.Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(sourceContentcrc64)) + } + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if transactionalContentMD5 != nil { + req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if maxSize != nil { + req.Header.Set("x-ms-blob-condition-maxsize", strconv.FormatInt(*maxSize, 10)) + } + if appendPosition != nil { + req.Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPosition, 10)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + if sourceIfModifiedSince != nil { + req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) + } + if sourceIfUnmodifiedSince != nil { + req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if sourceIfMatch != nil { + req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) + } + if sourceIfNoneMatch != nil { + req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if copySourceAuthorization != nil { + req.Header.Set("x-ms-copy-source-authorization", *copySourceAuthorization) + } + return req, nil +} + +// appendBlockFromURLResponder handles the response to the AppendBlockFromURL request. +func (client appendBlobClient) appendBlockFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &AppendBlobAppendBlockFromURLResponse{rawResponse: resp.Response()}, err +} + +// Create the Create Append Blob operation creates a new append blob. +// +// contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more +// information, see Setting +// Timeouts for Blob Service Operations. blobContentType is optional. Sets the blob's content type. If specified, +// this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the +// blob's content encoding. If specified, this property is stored with the blob and returned with a read request. +// blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the +// blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this +// hash is not validated, as the hashes for the individual blocks were validated when each was uploaded. +// blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and +// returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the +// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the +// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified +// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, +// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and +// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is +// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header. +// encryptionKey is optional. Specifies the encryption key to use to encrypt the data provided in the request. If not +// specified, encryption is performed with the root account encryption key. For more information, see Encryption at +// Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be +// provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the +// encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key +// header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the name of the encryption +// scope to use to encrypt the data provided in the request. If not specified, encryption is performed with the default +// account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. ifModifiedSince +// is specify this header value to operate only on a blob if it has been modified since the specified date/time. +// ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the +// specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is +// specify an ETag value to operate only on blobs without a matching value. ifTags is specify a SQL where clause on +// blob tags to operate only on blobs with a matching value. requestID is provides a client-generated, opaque value +// with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. +// blobTagsString is optional. Used to set blob tags in various blob operations. immutabilityPolicyExpiry is specifies +// the date time when the blobs immutability policy is set to expire. immutabilityPolicyMode is specifies the +// immutability policy mode to set on the blob. legalHold is specified if a legal hold should be set on the blob. +func (client appendBlobClient) Create(ctx context.Context, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (*AppendBlobCreateResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.createPreparer(contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID, blobTagsString, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req) + if err != nil { + return nil, err + } + return resp.(*AppendBlobCreateResponse), err +} + +// createPreparer prepares the Create request. +func (client appendBlobClient) createPreparer(contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if blobContentType != nil { + req.Header.Set("x-ms-blob-content-type", *blobContentType) + } + if blobContentEncoding != nil { + req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) + } + if blobContentLanguage != nil { + req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) + } + if blobContentMD5 != nil { + req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) + } + if blobCacheControl != nil { + req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) + } + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if blobContentDisposition != nil { + req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if blobTagsString != nil { + req.Header.Set("x-ms-tags", *blobTagsString) + } + if immutabilityPolicyExpiry != nil { + req.Header.Set("x-ms-immutability-policy-until-date", (*immutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)) + } + if immutabilityPolicyMode != BlobImmutabilityPolicyModeNone { + req.Header.Set("x-ms-immutability-policy-mode", string(immutabilityPolicyMode)) + } + if legalHold != nil { + req.Header.Set("x-ms-legal-hold", strconv.FormatBool(*legalHold)) + } + req.Header.Set("x-ms-blob-type", "AppendBlob") + return req, nil +} + +// createResponder handles the response to the Create request. +func (client appendBlobClient) createResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &AppendBlobCreateResponse{rawResponse: resp.Response()}, err +} + +// Seal the Seal operation seals the Append Blob to make it read-only. Seal is supported only on version 2019-12-12 +// version or later. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. leaseID is if +// specified, the operation only succeeds if the resource's lease is active and matches this ID. ifModifiedSince is +// specify this header value to operate only on a blob if it has been modified since the specified date/time. +// ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the +// specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is +// specify an ETag value to operate only on blobs without a matching value. appendPosition is optional conditional +// header, used only for the Append Block operation. A number indicating the byte offset to compare. Append Block will +// succeed only if the append position is equal to this number. If it is not, the request will fail with the +// AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). +func (client appendBlobClient) Seal(ctx context.Context, timeout *int32, requestID *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, appendPosition *int64) (*AppendBlobSealResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.sealPreparer(timeout, requestID, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, appendPosition) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.sealResponder}, req) + if err != nil { + return nil, err + } + return resp.(*AppendBlobSealResponse), err +} + +// sealPreparer prepares the Seal request. +func (client appendBlobClient) sealPreparer(timeout *int32, requestID *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, appendPosition *int64) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "seal") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if appendPosition != nil { + req.Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPosition, 10)) + } + return req, nil +} + +// sealResponder handles the response to the Seal request. +func (client appendBlobClient) sealResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &AppendBlobSealResponse{rawResponse: resp.Response()}, err +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_blob.go new file mode 100644 index 000000000..6e3b9a207 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_blob.go @@ -0,0 +1,2063 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/xml" + "github.com/Azure/azure-pipeline-go/pipeline" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" +) + +// blobClient is the client for the Blob methods of the Azblob service. +type blobClient struct { + managementClient +} + +// newBlobClient creates an instance of the blobClient client. +func newBlobClient(url url.URL, p pipeline.Pipeline) blobClient { + return blobClient{newManagementClient(url, p)} +} + +// AbortCopyFromURL the Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a +// destination blob with zero length and full metadata. +// +// copyID is the copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. timeout is +// the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character +// limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client blobClient) AbortCopyFromURL(ctx context.Context, copyID string, timeout *int32, leaseID *string, requestID *string) (*BlobAbortCopyFromURLResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.abortCopyFromURLPreparer(copyID, timeout, leaseID, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.abortCopyFromURLResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobAbortCopyFromURLResponse), err +} + +// abortCopyFromURLPreparer prepares the AbortCopyFromURL request. +func (client blobClient) abortCopyFromURLPreparer(copyID string, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + params.Set("copyid", copyID) + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "copy") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-copy-action", "abort") + return req, nil +} + +// abortCopyFromURLResponder handles the response to the AbortCopyFromURL request. +func (client blobClient) abortCopyFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusNoContent) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobAbortCopyFromURLResponse{rawResponse: resp.Response()}, err +} + +// AcquireLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete +// operations +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. duration is specifies the duration of the lease, in seconds, or negative +// one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease duration +// cannot be changed using renew or change. proposedLeaseID is proposed lease ID, in a GUID string format. The Blob +// service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor +// (String) for a list of valid GUID string formats. ifModifiedSince is specify this header value to operate only on a +// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching +// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the +// analytics logs when storage analytics logging is enabled. +func (client blobClient) AcquireLease(ctx context.Context, timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobAcquireLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.acquireLeasePreparer(timeout, duration, proposedLeaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.acquireLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobAcquireLeaseResponse), err +} + +// acquireLeasePreparer prepares the AcquireLease request. +func (client blobClient) acquireLeasePreparer(timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + req.URL.RawQuery = params.Encode() + if duration != nil { + req.Header.Set("x-ms-lease-duration", strconv.FormatInt(int64(*duration), 10)) + } + if proposedLeaseID != nil { + req.Header.Set("x-ms-proposed-lease-id", *proposedLeaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "acquire") + return req, nil +} + +// acquireLeaseResponder handles the response to the AcquireLease request. +func (client blobClient) acquireLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobAcquireLeaseResponse{rawResponse: resp.Response()}, err +} + +// BreakLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete +// operations +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. breakPeriod is for a break operation, proposed duration the lease should +// continue before it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter than the +// time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available +// before the break period has expired, but the lease may be held for longer than the break period. If this header does +// not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an +// infinite lease breaks immediately. ifModifiedSince is specify this header value to operate only on a blob if it has +// been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a +// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on +// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. +// ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. requestID is +// provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when +// storage analytics logging is enabled. +func (client blobClient) BreakLease(ctx context.Context, timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobBreakLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.breakLeasePreparer(timeout, breakPeriod, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.breakLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobBreakLeaseResponse), err +} + +// breakLeasePreparer prepares the BreakLease request. +func (client blobClient) breakLeasePreparer(timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + req.URL.RawQuery = params.Encode() + if breakPeriod != nil { + req.Header.Set("x-ms-lease-break-period", strconv.FormatInt(int64(*breakPeriod), 10)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "break") + return req, nil +} + +// breakLeaseResponder handles the response to the BreakLease request. +func (client blobClient) breakLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobBreakLeaseResponse{rawResponse: resp.Response()}, err +} + +// ChangeLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete +// operations +// +// leaseID is specifies the current lease ID on the resource. proposedLeaseID is proposed lease ID, in a GUID string +// format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See +// Guid Constructor (String) for a list of valid GUID string formats. timeout is the timeout parameter is expressed in +// seconds. For more information, see Setting +// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate +// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a +// matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. +// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics +// logs when storage analytics logging is enabled. +func (client blobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobChangeLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.changeLeasePreparer(leaseID, proposedLeaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.changeLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobChangeLeaseResponse), err +} + +// changeLeasePreparer prepares the ChangeLease request. +func (client blobClient) changeLeasePreparer(leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-lease-id", leaseID) + req.Header.Set("x-ms-proposed-lease-id", proposedLeaseID) + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "change") + return req, nil +} + +// changeLeaseResponder handles the response to the ChangeLease request. +func (client blobClient) changeLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobChangeLeaseResponse{rawResponse: resp.Response()}, err +} + +// CopyFromURL the Copy From URL operation copies a blob or an internet resource to a new blob. It will not return a +// response until the copy is complete. +// +// copySource is specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that +// specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob +// must either be public or must be authenticated via a shared access signature. timeout is the timeout parameter is +// expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. metadata is optional. Specifies a user-defined name-value pair associated +// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or +// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with +// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version +// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing +// Containers, Blobs, and Metadata for more information. tier is optional. Indicates the tier to be set on the blob. +// sourceIfModifiedSince is specify this header value to operate only on a blob if it has been modified since the +// specified date/time. sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not +// been modified since the specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a +// matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value. +// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified +// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified +// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. +// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a SQL +// where clause on blob tags to operate only on blobs with a matching value. leaseID is if specified, the operation +// only succeeds if the resource's lease is active and matches this ID. requestID is provides a client-generated, +// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is +// enabled. sourceContentMD5 is specify the md5 calculated for the range of bytes that must be read from the copy +// source. blobTagsString is optional. Used to set blob tags in various blob operations. immutabilityPolicyExpiry is +// specifies the date time when the blobs immutability policy is set to expire. immutabilityPolicyMode is specifies the +// immutability policy mode to set on the blob. legalHold is specified if a legal hold should be set on the blob. +// copySourceAuthorization is only Bearer type is supported. Credentials should be a valid OAuth access token to copy +// source. +func (client blobClient) CopyFromURL(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, tier AccessTierType, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string, sourceContentMD5 []byte, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool, copySourceAuthorization *string) (*BlobCopyFromURLResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.copyFromURLPreparer(copySource, timeout, metadata, tier, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, leaseID, requestID, sourceContentMD5, blobTagsString, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold, copySourceAuthorization) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.copyFromURLResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobCopyFromURLResponse), err +} + +// copyFromURLPreparer prepares the CopyFromURL request. +func (client blobClient) copyFromURLPreparer(copySource string, timeout *int32, metadata map[string]string, tier AccessTierType, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string, sourceContentMD5 []byte, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool, copySourceAuthorization *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if tier != AccessTierNone { + req.Header.Set("x-ms-access-tier", string(tier)) + } + if sourceIfModifiedSince != nil { + req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) + } + if sourceIfUnmodifiedSince != nil { + req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if sourceIfMatch != nil { + req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) + } + if sourceIfNoneMatch != nil { + req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-copy-source", copySource) + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if sourceContentMD5 != nil { + req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) + } + if blobTagsString != nil { + req.Header.Set("x-ms-tags", *blobTagsString) + } + if immutabilityPolicyExpiry != nil { + req.Header.Set("x-ms-immutability-policy-until-date", (*immutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)) + } + if immutabilityPolicyMode != BlobImmutabilityPolicyModeNone { + req.Header.Set("x-ms-immutability-policy-mode", string(immutabilityPolicyMode)) + } + if legalHold != nil { + req.Header.Set("x-ms-legal-hold", strconv.FormatBool(*legalHold)) + } + if copySourceAuthorization != nil { + req.Header.Set("x-ms-copy-source-authorization", *copySourceAuthorization) + } + req.Header.Set("x-ms-requires-sync", "true") + return req, nil +} + +// copyFromURLResponder handles the response to the CopyFromURL request. +func (client blobClient) copyFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobCopyFromURLResponse{rawResponse: resp.Response()}, err +} + +// CreateSnapshot the Create Snapshot operation creates a read-only snapshot of a blob +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. metadata is optional. Specifies a user-defined name-value pair associated +// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or +// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with +// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version +// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing +// Containers, Blobs, and Metadata for more information. encryptionKey is optional. Specifies the encryption key to use +// to encrypt the data provided in the request. If not specified, encryption is performed with the root account +// encryption key. For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the +// SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. +// encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the only accepted value is +// "AES256". Must be provided if the x-ms-encryption-key header is provided. encryptionScope is optional. Version +// 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided in the +// request. If not specified, encryption is performed with the default account encryption scope. For more information, +// see Encryption at Rest for Azure Storage Services. ifModifiedSince is specify this header value to operate only on a +// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching +// value. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. +// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics +// logs when storage analytics logging is enabled. +func (client blobClient) CreateSnapshot(ctx context.Context, timeout *int32, metadata map[string]string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string) (*BlobCreateSnapshotResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.createSnapshotPreparer(timeout, metadata, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, leaseID, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createSnapshotResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobCreateSnapshotResponse), err +} + +// createSnapshotPreparer prepares the CreateSnapshot request. +func (client blobClient) createSnapshotPreparer(timeout *int32, metadata map[string]string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "snapshot") + req.URL.RawQuery = params.Encode() + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// createSnapshotResponder handles the response to the CreateSnapshot request. +func (client blobClient) createSnapshotResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobCreateSnapshotResponse{rawResponse: resp.Response()}, err +} + +// Delete if the storage account's soft delete feature is disabled then, when a blob is deleted, it is permanently +// removed from the storage account. If the storage account's soft delete feature is enabled, then, when a blob is +// deleted, it is marked for deletion and becomes inaccessible immediately. However, the blob service retains the blob +// or snapshot for the number of days specified by the DeleteRetentionPolicy section of [Storage service properties] +// (Set-Blob-Service-Properties.md). After the specified number of days has passed, the blob's data is permanently +// removed from the storage account. Note that you continue to be charged for the soft-deleted blob's storage until it +// is permanently removed. Use the List Blobs API and specify the "include=deleted" query parameter to discover which +// blobs and snapshots have been soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. +// All other operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code of 404 +// (ResourceNotFound). +// +// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to +// retrieve. For more information on working with blob snapshots, see Creating +// a Snapshot of a Blob. versionID is the version id parameter is an opaque DateTime value that, when present, +// specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. timeout is the +// timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. deleteSnapshots is required if the blob has associated snapshots. Specify one +// of the following two options: include: Delete the base blob and all of its snapshots. only: Delete only the blob's +// snapshots and not the blob itself ifModifiedSince is specify this header value to operate only on a blob if it has +// been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a +// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on +// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. +// ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. requestID is +// provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when +// storage analytics logging is enabled. blobDeleteType is optional. Only possible value is 'permanent', which +// specifies to permanently delete a blob if blob soft delete is enabled. +func (client blobClient) Delete(ctx context.Context, snapshot *string, versionID *string, timeout *int32, leaseID *string, deleteSnapshots DeleteSnapshotsOptionType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobDeleteType BlobDeleteType) (*BlobDeleteResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.deletePreparer(snapshot, versionID, timeout, leaseID, deleteSnapshots, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID, blobDeleteType) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.deleteResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobDeleteResponse), err +} + +// deletePreparer prepares the Delete request. +func (client blobClient) deletePreparer(snapshot *string, versionID *string, timeout *int32, leaseID *string, deleteSnapshots DeleteSnapshotsOptionType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobDeleteType BlobDeleteType) (pipeline.Request, error) { + req, err := pipeline.NewRequest("DELETE", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + if versionID != nil && len(*versionID) > 0 { + params.Set("versionid", *versionID) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + if blobDeleteType != BlobDeleteNone { + params.Set("deletetype", string(blobDeleteType)) + } + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if deleteSnapshots != DeleteSnapshotsOptionNone { + req.Header.Set("x-ms-delete-snapshots", string(deleteSnapshots)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// deleteResponder handles the response to the Delete request. +func (client blobClient) deleteResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobDeleteResponse{rawResponse: resp.Response()}, err +} + +// DeleteImmutabilityPolicy the Delete Immutability Policy operation deletes the immutability policy on the blob +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client blobClient) DeleteImmutabilityPolicy(ctx context.Context, timeout *int32, requestID *string) (*BlobDeleteImmutabilityPolicyResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.deleteImmutabilityPolicyPreparer(timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.deleteImmutabilityPolicyResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobDeleteImmutabilityPolicyResponse), err +} + +// deleteImmutabilityPolicyPreparer prepares the DeleteImmutabilityPolicy request. +func (client blobClient) deleteImmutabilityPolicyPreparer(timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("DELETE", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "immutabilityPolicies") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// deleteImmutabilityPolicyResponder handles the response to the DeleteImmutabilityPolicy request. +func (client blobClient) deleteImmutabilityPolicyResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobDeleteImmutabilityPolicyResponse{rawResponse: resp.Response()}, err +} + +// Download the Download operation reads or downloads a blob from the system, including its metadata and properties. +// You can also call Download to read a snapshot. +// +// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to +// retrieve. For more information on working with blob snapshots, see Creating +// a Snapshot of a Blob. versionID is the version id parameter is an opaque DateTime value that, when present, +// specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. timeout is the +// timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified +// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. +// rangeGetContentMD5 is when set to true and specified together with the Range, the service returns the MD5 hash for +// the range, as long as the range is less than or equal to 4 MB in size. rangeGetContentCRC64 is when set to true and +// specified together with the Range, the service returns the CRC64 hash for the range, as long as the range is less +// than or equal to 4 MB in size. encryptionKey is optional. Specifies the encryption key to use to encrypt the data +// provided in the request. If not specified, encryption is performed with the root account encryption key. For more +// information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the +// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the +// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided +// if the x-ms-encryption-key header is provided. ifModifiedSince is specify this header value to operate only on a +// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching +// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the +// analytics logs when storage analytics logging is enabled. +func (client blobClient) Download(ctx context.Context, snapshot *string, versionID *string, timeout *int32, rangeParameter *string, leaseID *string, rangeGetContentMD5 *bool, rangeGetContentCRC64 *bool, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*downloadResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.downloadPreparer(snapshot, versionID, timeout, rangeParameter, leaseID, rangeGetContentMD5, rangeGetContentCRC64, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.downloadResponder}, req) + if err != nil { + return nil, err + } + return resp.(*downloadResponse), err +} + +// downloadPreparer prepares the Download request. +func (client blobClient) downloadPreparer(snapshot *string, versionID *string, timeout *int32, rangeParameter *string, leaseID *string, rangeGetContentMD5 *bool, rangeGetContentCRC64 *bool, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + if versionID != nil && len(*versionID) > 0 { + params.Set("versionid", *versionID) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + if rangeParameter != nil { + req.Header.Set("x-ms-range", *rangeParameter) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if rangeGetContentMD5 != nil { + req.Header.Set("x-ms-range-get-content-md5", strconv.FormatBool(*rangeGetContentMD5)) + } + if rangeGetContentCRC64 != nil { + req.Header.Set("x-ms-range-get-content-crc64", strconv.FormatBool(*rangeGetContentCRC64)) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// downloadResponder handles the response to the Download request. +func (client blobClient) downloadResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusPartialContent) + if resp == nil { + return nil, err + } + return &downloadResponse{rawResponse: resp.Response()}, err +} + +// GetAccountInfo returns the sku name and account kind +func (client blobClient) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) { + req, err := client.getAccountInfoPreparer() + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccountInfoResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobGetAccountInfoResponse), err +} + +// getAccountInfoPreparer prepares the GetAccountInfo request. +func (client blobClient) getAccountInfoPreparer() (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + params.Set("restype", "account") + params.Set("comp", "properties") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + return req, nil +} + +// getAccountInfoResponder handles the response to the GetAccountInfo request. +func (client blobClient) getAccountInfoResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobGetAccountInfoResponse{rawResponse: resp.Response()}, err +} + +// GetProperties the Get Properties operation returns all user-defined metadata, standard HTTP properties, and system +// properties for the blob. It does not return the content of the blob. +// +// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to +// retrieve. For more information on working with blob snapshots, see Creating +// a Snapshot of a Blob. versionID is the version id parameter is an opaque DateTime value that, when present, +// specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. timeout is the +// timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. encryptionKey is optional. Specifies the encryption key to use to encrypt the +// data provided in the request. If not specified, encryption is performed with the root account encryption key. For +// more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the +// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the +// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided +// if the x-ms-encryption-key header is provided. ifModifiedSince is specify this header value to operate only on a +// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching +// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the +// analytics logs when storage analytics logging is enabled. +func (client blobClient) GetProperties(ctx context.Context, snapshot *string, versionID *string, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobGetPropertiesResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getPropertiesPreparer(snapshot, versionID, timeout, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPropertiesResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobGetPropertiesResponse), err +} + +// getPropertiesPreparer prepares the GetProperties request. +func (client blobClient) getPropertiesPreparer(snapshot *string, versionID *string, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("HEAD", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + if versionID != nil && len(*versionID) > 0 { + params.Set("versionid", *versionID) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getPropertiesResponder handles the response to the GetProperties request. +func (client blobClient) getPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobGetPropertiesResponse{rawResponse: resp.Response()}, err +} + +// GetTags the Get Tags operation enables users to get the tags associated with a blob. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. snapshot is the +// snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more +// information on working with blob snapshots, see Creating +// a Snapshot of a Blob. versionID is the version id parameter is an opaque DateTime value that, when present, +// specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. ifTags is specify a +// SQL where clause on blob tags to operate only on blobs with a matching value. leaseID is if specified, the operation +// only succeeds if the resource's lease is active and matches this ID. +func (client blobClient) GetTags(ctx context.Context, timeout *int32, requestID *string, snapshot *string, versionID *string, ifTags *string, leaseID *string) (*BlobTags, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getTagsPreparer(timeout, requestID, snapshot, versionID, ifTags, leaseID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getTagsResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobTags), err +} + +// getTagsPreparer prepares the GetTags request. +func (client blobClient) getTagsPreparer(timeout *int32, requestID *string, snapshot *string, versionID *string, ifTags *string, leaseID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + if versionID != nil && len(*versionID) > 0 { + params.Set("versionid", *versionID) + } + params.Set("comp", "tags") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + return req, nil +} + +// getTagsResponder handles the response to the GetTags request. +func (client blobClient) getTagsResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &BlobTags{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// Query the Query operation enables users to select/project on blob data by providing simple query expressions. +// +// // queryPreparer prepares the Query request. +// func (client blobClient) queryPreparer(snapshot *string, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *string, ifNoneMatch *string, ifTags *string, requestID *string) (pipeline.Request, error) { +// req, err := pipeline.NewRequest("POST", client.url, nil) +// if err != nil { +// return req, pipeline.NewError(err, "failed to create request") +// } +// params := req.URL.Query() +// if snapshot != nil && len(*snapshot) > 0 { +// params.Set("snapshot", *snapshot) +// } +// if timeout != nil { +// params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) +// } +// params.Set("comp", "query") +// req.URL.RawQuery = params.Encode() +// if leaseID != nil { +// req.Header.Set("x-ms-lease-id", *leaseID) +// } +// if encryptionKey != nil { +// req.Header.Set("x-ms-encryption-key", *encryptionKey) +// } +// if encryptionKeySha256 != nil { +// req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) +// } +// if encryptionAlgorithm != EncryptionAlgorithmNone { +// req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) +// } +// if ifModifiedSince != nil { +// req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) +// } +// if ifUnmodifiedSince != nil { +// req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) +// } +// if ifMatch != nil { +// req.Header.Set("If-Match", *ifMatch) +// } +// if ifNoneMatch != nil { +// req.Header.Set("If-None-Match", *ifNoneMatch) +// } +// if ifTags != nil { +// req.Header.Set("x-ms-if-tags", *ifTags) +// } +// req.Header.Set("x-ms-version", ServiceVersion) +// if requestID != nil { +// req.Header.Set("x-ms-client-request-id", *requestID) +// } +// b, err := xml.Marshal(queryRequest) +// if err != nil { +// return req, pipeline.NewError(err, "failed to marshal request body") +// } +// req.Header.Set("Content-Type", "application/xml") +// err = req.SetBody(bytes.NewReader(b)) +// if err != nil { +// return req, pipeline.NewError(err, "failed to set request body") +// } +// return req, nil +// } +// +// // queryResponder handles the response to the Query request. +// func (client blobClient) queryResponder(resp pipeline.Response) (pipeline.Response, error) { +// err := validateResponse(resp, http.StatusOK, http.StatusPartialContent) +// if resp == nil { +// return nil, err +// } +// return &QueryResponse{rawResponse: resp.Response()}, err +// } + +// ReleaseLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete +// operations +// +// leaseID is specifies the current lease ID on the resource. timeout is the timeout parameter is expressed in seconds. +// For more information, see Setting +// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate +// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a +// matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. +// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics +// logs when storage analytics logging is enabled. +func (client blobClient) ReleaseLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobReleaseLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.releaseLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.releaseLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobReleaseLeaseResponse), err +} + +// releaseLeasePreparer prepares the ReleaseLease request. +func (client blobClient) releaseLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-lease-id", leaseID) + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "release") + return req, nil +} + +// releaseLeaseResponder handles the response to the ReleaseLease request. +func (client blobClient) releaseLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobReleaseLeaseResponse{rawResponse: resp.Response()}, err +} + +// RenewLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete +// operations +// +// leaseID is specifies the current lease ID on the resource. timeout is the timeout parameter is expressed in seconds. +// For more information, see Setting +// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate +// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a +// matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. +// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics +// logs when storage analytics logging is enabled. +func (client blobClient) RenewLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobRenewLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.renewLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.renewLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobRenewLeaseResponse), err +} + +// renewLeasePreparer prepares the RenewLease request. +func (client blobClient) renewLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-lease-id", leaseID) + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "renew") + return req, nil +} + +// renewLeaseResponder handles the response to the RenewLease request. +func (client blobClient) renewLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobRenewLeaseResponse{rawResponse: resp.Response()}, err +} + +// SetExpiry sets the time a blob will expire and be deleted. +// +// expiryOptions is required. Indicates mode of the expiry time timeout is the timeout parameter is expressed in +// seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. expiresOn is the +// time to set the blob to expiry +func (client blobClient) SetExpiry(ctx context.Context, expiryOptions BlobExpiryOptionsType, timeout *int32, requestID *string, expiresOn *string) (*BlobSetExpiryResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setExpiryPreparer(expiryOptions, timeout, requestID, expiresOn) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setExpiryResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobSetExpiryResponse), err +} + +// setExpiryPreparer prepares the SetExpiry request. +func (client blobClient) setExpiryPreparer(expiryOptions BlobExpiryOptionsType, timeout *int32, requestID *string, expiresOn *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "expiry") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-expiry-option", string(expiryOptions)) + if expiresOn != nil { + req.Header.Set("x-ms-expiry-time", *expiresOn) + } + return req, nil +} + +// setExpiryResponder handles the response to the SetExpiry request. +func (client blobClient) setExpiryResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobSetExpiryResponse{rawResponse: resp.Response()}, err +} + +// SetHTTPHeaders the Set HTTP Headers operation sets system properties on the blob +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. blobCacheControl is optional. Sets the blob's cache control. If specified, +// this property is stored with the blob and returned with a read request. blobContentType is optional. Sets the blob's +// content type. If specified, this property is stored with the blob and returned with a read request. blobContentMD5 +// is optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual +// blocks were validated when each was uploaded. blobContentEncoding is optional. Sets the blob's content encoding. If +// specified, this property is stored with the blob and returned with a read request. blobContentLanguage is optional. +// Set the blob's content language. If specified, this property is stored with the blob and returned with a read +// request. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. +// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified +// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified +// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. +// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a SQL +// where clause on blob tags to operate only on blobs with a matching value. blobContentDisposition is optional. Sets +// the blob's Content-Disposition header. requestID is provides a client-generated, opaque value with a 1 KB character +// limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client blobClient) SetHTTPHeaders(ctx context.Context, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentMD5 []byte, blobContentEncoding *string, blobContentLanguage *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, blobContentDisposition *string, requestID *string) (*BlobSetHTTPHeadersResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setHTTPHeadersPreparer(timeout, blobCacheControl, blobContentType, blobContentMD5, blobContentEncoding, blobContentLanguage, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, blobContentDisposition, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setHTTPHeadersResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobSetHTTPHeadersResponse), err +} + +// setHTTPHeadersPreparer prepares the SetHTTPHeaders request. +func (client blobClient) setHTTPHeadersPreparer(timeout *int32, blobCacheControl *string, blobContentType *string, blobContentMD5 []byte, blobContentEncoding *string, blobContentLanguage *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, blobContentDisposition *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "properties") + req.URL.RawQuery = params.Encode() + if blobCacheControl != nil { + req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) + } + if blobContentType != nil { + req.Header.Set("x-ms-blob-content-type", *blobContentType) + } + if blobContentMD5 != nil { + req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) + } + if blobContentEncoding != nil { + req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) + } + if blobContentLanguage != nil { + req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + if blobContentDisposition != nil { + req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// setHTTPHeadersResponder handles the response to the SetHTTPHeaders request. +func (client blobClient) setHTTPHeadersResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobSetHTTPHeadersResponse{rawResponse: resp.Response()}, err +} + +// SetImmutabilityPolicy the Set Immutability Policy operation sets the immutability policy on the blob +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. ifUnmodifiedSince +// is specify this header value to operate only on a blob if it has not been modified since the specified date/time. +// immutabilityPolicyExpiry is specifies the date time when the blobs immutability policy is set to expire. +// immutabilityPolicyMode is specifies the immutability policy mode to set on the blob. +func (client blobClient) SetImmutabilityPolicy(ctx context.Context, timeout *int32, requestID *string, ifUnmodifiedSince *time.Time, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType) (*BlobSetImmutabilityPolicyResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setImmutabilityPolicyPreparer(timeout, requestID, ifUnmodifiedSince, immutabilityPolicyExpiry, immutabilityPolicyMode) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setImmutabilityPolicyResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobSetImmutabilityPolicyResponse), err +} + +// setImmutabilityPolicyPreparer prepares the SetImmutabilityPolicy request. +func (client blobClient) setImmutabilityPolicyPreparer(timeout *int32, requestID *string, ifUnmodifiedSince *time.Time, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "immutabilityPolicies") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if immutabilityPolicyExpiry != nil { + req.Header.Set("x-ms-immutability-policy-until-date", (*immutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)) + } + if immutabilityPolicyMode != BlobImmutabilityPolicyModeNone { + req.Header.Set("x-ms-immutability-policy-mode", string(immutabilityPolicyMode)) + } + return req, nil +} + +// setImmutabilityPolicyResponder handles the response to the SetImmutabilityPolicy request. +func (client blobClient) setImmutabilityPolicyResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobSetImmutabilityPolicyResponse{rawResponse: resp.Response()}, err +} + +// SetLegalHold the Set Legal Hold operation sets a legal hold on the blob. +// +// legalHold is specified if a legal hold should be set on the blob. timeout is the timeout parameter is expressed in +// seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client blobClient) SetLegalHold(ctx context.Context, legalHold bool, timeout *int32, requestID *string) (*BlobSetLegalHoldResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setLegalHoldPreparer(legalHold, timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setLegalHoldResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobSetLegalHoldResponse), err +} + +// setLegalHoldPreparer prepares the SetLegalHold request. +func (client blobClient) setLegalHoldPreparer(legalHold bool, timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "legalhold") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-legal-hold", strconv.FormatBool(legalHold)) + return req, nil +} + +// setLegalHoldResponder handles the response to the SetLegalHold request. +func (client blobClient) setLegalHoldResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobSetLegalHoldResponse{rawResponse: resp.Response()}, err +} + +// SetMetadata the Set Blob Metadata operation sets user-defined metadata for the specified blob as one or more +// name-value pairs +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. metadata is optional. Specifies a user-defined name-value pair associated +// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or +// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with +// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version +// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing +// Containers, Blobs, and Metadata for more information. leaseID is if specified, the operation only succeeds if the +// resource's lease is active and matches this ID. encryptionKey is optional. Specifies the encryption key to use to +// encrypt the data provided in the request. If not specified, encryption is performed with the root account encryption +// key. For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 +// hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. +// encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the only accepted value is +// "AES256". Must be provided if the x-ms-encryption-key header is provided. encryptionScope is optional. Version +// 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided in the +// request. If not specified, encryption is performed with the default account encryption scope. For more information, +// see Encryption at Rest for Azure Storage Services. ifModifiedSince is specify this header value to operate only on a +// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching +// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the +// analytics logs when storage analytics logging is enabled. +func (client blobClient) SetMetadata(ctx context.Context, timeout *int32, metadata map[string]string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobSetMetadataResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setMetadataPreparer(timeout, metadata, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setMetadataResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobSetMetadataResponse), err +} + +// setMetadataPreparer prepares the SetMetadata request. +func (client blobClient) setMetadataPreparer(timeout *int32, metadata map[string]string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "metadata") + req.URL.RawQuery = params.Encode() + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// setMetadataResponder handles the response to the SetMetadata request. +func (client blobClient) setMetadataResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobSetMetadataResponse{rawResponse: resp.Response()}, err +} + +// SetTags the Set Tags operation enables users to set tags on a blob. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. versionID is the version id parameter is an opaque DateTime value that, +// when present, specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. +// transactionalContentMD5 is specify the transactional md5 for the body, to be validated by the service. +// transactionalContentCrc64 is specify the transactional crc64 for the body, to be validated by the service. requestID +// is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when +// storage analytics logging is enabled. ifTags is specify a SQL where clause on blob tags to operate only on blobs +// with a matching value. leaseID is if specified, the operation only succeeds if the resource's lease is active and +// matches this ID. tags is blob tags +func (client blobClient) SetTags(ctx context.Context, timeout *int32, versionID *string, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, requestID *string, ifTags *string, leaseID *string, tags *BlobTags) (*BlobSetTagsResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setTagsPreparer(timeout, versionID, transactionalContentMD5, transactionalContentCrc64, requestID, ifTags, leaseID, tags) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setTagsResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobSetTagsResponse), err +} + +// setTagsPreparer prepares the SetTags request. +func (client blobClient) setTagsPreparer(timeout *int32, versionID *string, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, requestID *string, ifTags *string, leaseID *string, tags *BlobTags) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + if versionID != nil && len(*versionID) > 0 { + params.Set("versionid", *versionID) + } + params.Set("comp", "tags") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if transactionalContentMD5 != nil { + req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) + } + if transactionalContentCrc64 != nil { + req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64)) + } + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + b, err := xml.Marshal(tags) + if err != nil { + return req, pipeline.NewError(err, "failed to marshal request body") + } + req.Header.Set("Content-Type", "application/xml") + err = req.SetBody(bytes.NewReader(b)) + if err != nil { + return req, pipeline.NewError(err, "failed to set request body") + } + return req, nil +} + +// setTagsResponder handles the response to the SetTags request. +func (client blobClient) setTagsResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusNoContent) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobSetTagsResponse{rawResponse: resp.Response()}, err +} + +// SetTier the Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage +// account and on a block blob in a blob storage account (locally redundant storage only). A premium page blob's tier +// determines the allowed size, IOPS, and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive +// storage type. This operation does not update the blob's ETag. +// +// tier is indicates the tier to be set on the blob. snapshot is the snapshot parameter is an opaque DateTime value +// that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, +// see Creating +// a Snapshot of a Blob. versionID is the version id parameter is an opaque DateTime value that, when present, +// specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. timeout is the +// timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. rehydratePriority is optional: Indicates the priority with which to +// rehydrate an archived blob. requestID is provides a client-generated, opaque value with a 1 KB character limit that +// is recorded in the analytics logs when storage analytics logging is enabled. leaseID is if specified, the operation +// only succeeds if the resource's lease is active and matches this ID. ifTags is specify a SQL where clause on blob +// tags to operate only on blobs with a matching value. +func (client blobClient) SetTier(ctx context.Context, tier AccessTierType, snapshot *string, versionID *string, timeout *int32, rehydratePriority RehydratePriorityType, requestID *string, leaseID *string, ifTags *string) (*BlobSetTierResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setTierPreparer(tier, snapshot, versionID, timeout, rehydratePriority, requestID, leaseID, ifTags) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setTierResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobSetTierResponse), err +} + +// setTierPreparer prepares the SetTier request. +func (client blobClient) setTierPreparer(tier AccessTierType, snapshot *string, versionID *string, timeout *int32, rehydratePriority RehydratePriorityType, requestID *string, leaseID *string, ifTags *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + if versionID != nil && len(*versionID) > 0 { + params.Set("versionid", *versionID) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "tier") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-access-tier", string(tier)) + if rehydratePriority != RehydratePriorityNone { + req.Header.Set("x-ms-rehydrate-priority", string(rehydratePriority)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + return req, nil +} + +// setTierResponder handles the response to the SetTier request. +func (client blobClient) setTierResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobSetTierResponse{rawResponse: resp.Response()}, err +} + +// StartCopyFromURL the Start Copy From URL operation copies a blob or an internet resource to a new blob. +// +// copySource is specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that +// specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob +// must either be public or must be authenticated via a shared access signature. timeout is the timeout parameter is +// expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. metadata is optional. Specifies a user-defined name-value pair associated +// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or +// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with +// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version +// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing +// Containers, Blobs, and Metadata for more information. tier is optional. Indicates the tier to be set on the blob. +// rehydratePriority is optional: Indicates the priority with which to rehydrate an archived blob. +// sourceIfModifiedSince is specify this header value to operate only on a blob if it has been modified since the +// specified date/time. sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not +// been modified since the specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a +// matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value. +// sourceIfTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. +// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified +// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified +// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. +// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a SQL +// where clause on blob tags to operate only on blobs with a matching value. leaseID is if specified, the operation +// only succeeds if the resource's lease is active and matches this ID. requestID is provides a client-generated, +// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is +// enabled. blobTagsString is optional. Used to set blob tags in various blob operations. sealBlob is overrides the +// sealed state of the destination blob. Service version 2019-12-12 and newer. immutabilityPolicyExpiry is specifies +// the date time when the blobs immutability policy is set to expire. immutabilityPolicyMode is specifies the +// immutability policy mode to set on the blob. legalHold is specified if a legal hold should be set on the blob. +func (client blobClient) StartCopyFromURL(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, tier AccessTierType, rehydratePriority RehydratePriorityType, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, sourceIfTags *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string, blobTagsString *string, sealBlob *bool, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (*BlobStartCopyFromURLResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.startCopyFromURLPreparer(copySource, timeout, metadata, tier, rehydratePriority, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, sourceIfTags, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, leaseID, requestID, blobTagsString, sealBlob, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.startCopyFromURLResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobStartCopyFromURLResponse), err +} + +// startCopyFromURLPreparer prepares the StartCopyFromURL request. +func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *int32, metadata map[string]string, tier AccessTierType, rehydratePriority RehydratePriorityType, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, sourceIfTags *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string, blobTagsString *string, sealBlob *bool, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if tier != AccessTierNone { + req.Header.Set("x-ms-access-tier", string(tier)) + } + if rehydratePriority != RehydratePriorityNone { + req.Header.Set("x-ms-rehydrate-priority", string(rehydratePriority)) + } + if sourceIfModifiedSince != nil { + req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) + } + if sourceIfUnmodifiedSince != nil { + req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if sourceIfMatch != nil { + req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) + } + if sourceIfNoneMatch != nil { + req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) + } + if sourceIfTags != nil { + req.Header.Set("x-ms-source-if-tags", *sourceIfTags) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-copy-source", copySource) + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if blobTagsString != nil { + req.Header.Set("x-ms-tags", *blobTagsString) + } + if sealBlob != nil { + req.Header.Set("x-ms-seal-blob", strconv.FormatBool(*sealBlob)) + } + if immutabilityPolicyExpiry != nil { + req.Header.Set("x-ms-immutability-policy-until-date", (*immutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)) + } + if immutabilityPolicyMode != BlobImmutabilityPolicyModeNone { + req.Header.Set("x-ms-immutability-policy-mode", string(immutabilityPolicyMode)) + } + if legalHold != nil { + req.Header.Set("x-ms-legal-hold", strconv.FormatBool(*legalHold)) + } + return req, nil +} + +// startCopyFromURLResponder handles the response to the StartCopyFromURL request. +func (client blobClient) startCopyFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobStartCopyFromURLResponse{rawResponse: resp.Response()}, err +} + +// Undelete undelete a blob that was previously soft deleted +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client blobClient) Undelete(ctx context.Context, timeout *int32, requestID *string) (*BlobUndeleteResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.undeletePreparer(timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.undeleteResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobUndeleteResponse), err +} + +// undeletePreparer prepares the Undelete request. +func (client blobClient) undeletePreparer(timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "undelete") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// undeleteResponder handles the response to the Undelete request. +func (client blobClient) undeleteResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobUndeleteResponse{rawResponse: resp.Response()}, err +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go new file mode 100644 index 000000000..31067ed64 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go @@ -0,0 +1,848 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/xml" + "github.com/Azure/azure-pipeline-go/pipeline" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" +) + +// blockBlobClient is the client for the BlockBlob methods of the Azblob service. +type blockBlobClient struct { + managementClient +} + +// newBlockBlobClient creates an instance of the blockBlobClient client. +func newBlockBlobClient(url url.URL, p pipeline.Pipeline) blockBlobClient { + return blockBlobClient{newManagementClient(url, p)} +} + +// CommitBlockList the Commit Block List operation writes a blob by specifying the list of block IDs that make up the +// blob. In order to be written as part of a blob, a block must have been successfully written to the server in a prior +// Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that have changed, +// then committing the new and existing blocks together. You can do this by specifying whether to commit a block from +// the committed block list or from the uncommitted block list, or to commit the most recently uploaded version of the +// block, whichever list it may belong to. +// +// blocks is blob Blocks. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. blobCacheControl is optional. Sets the blob's cache control. If specified, +// this property is stored with the blob and returned with a read request. blobContentType is optional. Sets the blob's +// content type. If specified, this property is stored with the blob and returned with a read request. +// blobContentEncoding is optional. Sets the blob's content encoding. If specified, this property is stored with the +// blob and returned with a read request. blobContentLanguage is optional. Set the blob's content language. If +// specified, this property is stored with the blob and returned with a read request. blobContentMD5 is optional. An +// MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks were +// validated when each was uploaded. transactionalContentMD5 is specify the transactional md5 for the body, to be +// validated by the service. transactionalContentCrc64 is specify the transactional crc64 for the body, to be validated +// by the service. metadata is optional. Specifies a user-defined name-value pair associated with the blob. If no +// name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination +// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, +// and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names +// must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for +// more information. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches +// this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header. encryptionKey is optional. +// Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption is +// performed with the root account encryption key. For more information, see Encryption at Rest for Azure Storage +// Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be provided if the +// x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the encryption key +// hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is +// provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to +// use to encrypt the data provided in the request. If not specified, encryption is performed with the default account +// encryption scope. For more information, see Encryption at Rest for Azure Storage Services. tier is optional. +// Indicates the tier to be set on the blob. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate +// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a +// matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. +// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics +// logs when storage analytics logging is enabled. blobTagsString is optional. Used to set blob tags in various blob +// operations. immutabilityPolicyExpiry is specifies the date time when the blobs immutability policy is set to expire. +// immutabilityPolicyMode is specifies the immutability policy mode to set on the blob. legalHold is specified if a +// legal hold should be set on the blob. +func (client blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (*BlockBlobCommitBlockListResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.commitBlockListPreparer(blocks, timeout, blobCacheControl, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, transactionalContentMD5, transactionalContentCrc64, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID, blobTagsString, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.commitBlockListResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlockBlobCommitBlockListResponse), err +} + +// commitBlockListPreparer prepares the CommitBlockList request. +func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "blocklist") + req.URL.RawQuery = params.Encode() + if blobCacheControl != nil { + req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) + } + if blobContentType != nil { + req.Header.Set("x-ms-blob-content-type", *blobContentType) + } + if blobContentEncoding != nil { + req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) + } + if blobContentLanguage != nil { + req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) + } + if blobContentMD5 != nil { + req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) + } + if transactionalContentMD5 != nil { + req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) + } + if transactionalContentCrc64 != nil { + req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64)) + } + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if blobContentDisposition != nil { + req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if tier != AccessTierNone { + req.Header.Set("x-ms-access-tier", string(tier)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if blobTagsString != nil { + req.Header.Set("x-ms-tags", *blobTagsString) + } + if immutabilityPolicyExpiry != nil { + req.Header.Set("x-ms-immutability-policy-until-date", (*immutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)) + } + if immutabilityPolicyMode != BlobImmutabilityPolicyModeNone { + req.Header.Set("x-ms-immutability-policy-mode", string(immutabilityPolicyMode)) + } + if legalHold != nil { + req.Header.Set("x-ms-legal-hold", strconv.FormatBool(*legalHold)) + } + b, err := xml.Marshal(blocks) + if err != nil { + return req, pipeline.NewError(err, "failed to marshal request body") + } + req.Header.Set("Content-Type", "application/xml") + err = req.SetBody(bytes.NewReader(b)) + if err != nil { + return req, pipeline.NewError(err, "failed to set request body") + } + return req, nil +} + +// commitBlockListResponder handles the response to the CommitBlockList request. +func (client blockBlobClient) commitBlockListResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlockBlobCommitBlockListResponse{rawResponse: resp.Response()}, err +} + +// GetBlockList the Get Block List operation retrieves the list of blocks that have been uploaded as part of a block +// blob +// +// listType is specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists +// together. snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob +// snapshot to retrieve. For more information on working with blob snapshots, see Creating +// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. ifTags is specify a SQL where clause on blob tags to operate only on blobs with +// a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is +// recorded in the analytics logs when storage analytics logging is enabled. +func (client blockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, snapshot *string, timeout *int32, leaseID *string, ifTags *string, requestID *string) (*BlockList, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getBlockListPreparer(listType, snapshot, timeout, leaseID, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getBlockListResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlockList), err +} + +// getBlockListPreparer prepares the GetBlockList request. +func (client blockBlobClient) getBlockListPreparer(listType BlockListType, snapshot *string, timeout *int32, leaseID *string, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + params.Set("blocklisttype", string(listType)) + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "blocklist") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getBlockListResponder handles the response to the GetBlockList request. +func (client blockBlobClient) getBlockListResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &BlockList{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// PutBlobFromURL the Put Blob from URL operation creates a new Block Blob where the contents of the blob are read from +// a given URL. This API is supported beginning with the 2020-04-08 version. Partial updates are not supported with +// Put Blob from URL; the content of an existing blob is overwritten with the content of the new blob. To perform +// partial updates to a block blob’s contents using a source URL, use the Put Block from URL API in conjunction with +// Put Block List. +// +// contentLength is the length of the request. copySource is specifies the name of the source page blob snapshot. This +// value is a URL of up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it +// would appear in a request URI. The source blob must either be public or must be authenticated via a shared access +// signature. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. transactionalContentMD5 is specify the transactional md5 for the body, to +// be validated by the service. blobContentType is optional. Sets the blob's content type. If specified, this property +// is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the blob's content +// encoding. If specified, this property is stored with the blob and returned with a read request. blobContentLanguage +// is optional. Set the blob's content language. If specified, this property is stored with the blob and returned with +// a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this hash is not validated, +// as the hashes for the individual blocks were validated when each was uploaded. blobCacheControl is optional. Sets +// the blob's cache control. If specified, this property is stored with the blob and returned with a read request. +// metadata is optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are +// specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more +// name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not +// copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the +// naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. +// leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. +// blobContentDisposition is optional. Sets the blob's Content-Disposition header. encryptionKey is optional. Specifies +// the encryption key to use to encrypt the data provided in the request. If not specified, encryption is performed +// with the root account encryption key. For more information, see Encryption at Rest for Azure Storage Services. +// encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key +// header is provided. encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the +// only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. encryptionScope is +// optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data +// provided in the request. If not specified, encryption is performed with the default account encryption scope. For +// more information, see Encryption at Rest for Azure Storage Services. tier is optional. Indicates the tier to be set +// on the blob. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since +// the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been +// modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching +// value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a +// SQL where clause on blob tags to operate only on blobs with a matching value. sourceIfModifiedSince is specify this +// header value to operate only on a blob if it has been modified since the specified date/time. +// sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the +// specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a matching value. +// sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value. sourceIfTags is +// specify a SQL where clause on blob tags to operate only on blobs with a matching value. requestID is provides a +// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. sourceContentMD5 is specify the md5 calculated for the range of bytes that must be +// read from the copy source. blobTagsString is optional. Used to set blob tags in various blob operations. +// copySourceBlobProperties is optional, default is true. Indicates if properties from the source blob should be +// copied. copySourceAuthorization is only Bearer type is supported. Credentials should be a valid OAuth access token +// to copy source. +func (client blockBlobClient) PutBlobFromURL(ctx context.Context, contentLength int64, copySource string, timeout *int32, transactionalContentMD5 []byte, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, sourceIfTags *string, requestID *string, sourceContentMD5 []byte, blobTagsString *string, copySourceBlobProperties *bool, copySourceAuthorization *string) (*BlockBlobPutBlobFromURLResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.putBlobFromURLPreparer(contentLength, copySource, timeout, transactionalContentMD5, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, sourceIfTags, requestID, sourceContentMD5, blobTagsString, copySourceBlobProperties, copySourceAuthorization) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.putBlobFromURLResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlockBlobPutBlobFromURLResponse), err +} + +// putBlobFromURLPreparer prepares the PutBlobFromURL request. +func (client blockBlobClient) putBlobFromURLPreparer(contentLength int64, copySource string, timeout *int32, transactionalContentMD5 []byte, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, sourceIfTags *string, requestID *string, sourceContentMD5 []byte, blobTagsString *string, copySourceBlobProperties *bool, copySourceAuthorization *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + if transactionalContentMD5 != nil { + req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) + } + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if blobContentType != nil { + req.Header.Set("x-ms-blob-content-type", *blobContentType) + } + if blobContentEncoding != nil { + req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) + } + if blobContentLanguage != nil { + req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) + } + if blobContentMD5 != nil { + req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) + } + if blobCacheControl != nil { + req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) + } + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if blobContentDisposition != nil { + req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if tier != AccessTierNone { + req.Header.Set("x-ms-access-tier", string(tier)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + if sourceIfModifiedSince != nil { + req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) + } + if sourceIfUnmodifiedSince != nil { + req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if sourceIfMatch != nil { + req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) + } + if sourceIfNoneMatch != nil { + req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) + } + if sourceIfTags != nil { + req.Header.Set("x-ms-source-if-tags", *sourceIfTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if sourceContentMD5 != nil { + req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) + } + if blobTagsString != nil { + req.Header.Set("x-ms-tags", *blobTagsString) + } + req.Header.Set("x-ms-copy-source", copySource) + if copySourceBlobProperties != nil { + req.Header.Set("x-ms-copy-source-blob-properties", strconv.FormatBool(*copySourceBlobProperties)) + } + if copySourceAuthorization != nil { + req.Header.Set("x-ms-copy-source-authorization", *copySourceAuthorization) + } + req.Header.Set("x-ms-blob-type", "BlockBlob") + return req, nil +} + +// putBlobFromURLResponder handles the response to the PutBlobFromURL request. +func (client blockBlobClient) putBlobFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlockBlobPutBlobFromURLResponse{rawResponse: resp.Response()}, err +} + +// StageBlock the Stage Block operation creates a new block to be committed as part of a blob +// +// blockID is a valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or +// equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the +// same size for each block. contentLength is the length of the request. body is initial data body will be closed upon +// successful return. Callers should ensure closure when receiving an error.transactionalContentMD5 is specify the +// transactional md5 for the body, to be validated by the service. transactionalContentCrc64 is specify the +// transactional crc64 for the body, to be validated by the service. timeout is the timeout parameter is expressed in +// seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. encryptionKey is optional. Specifies the encryption key to use to encrypt the +// data provided in the request. If not specified, encryption is performed with the root account encryption key. For +// more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the +// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the +// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided +// if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies +// the name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is +// performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage +// Services. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the +// analytics logs when storage analytics logging is enabled. +func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, requestID *string) (*BlockBlobStageBlockResponse, error) { + if err := validate([]validation{ + {targetValue: body, + constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.stageBlockPreparer(blockID, contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.stageBlockResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlockBlobStageBlockResponse), err +} + +// stageBlockPreparer prepares the StageBlock request. +func (client blockBlobClient) stageBlockPreparer(blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, body) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + params.Set("blockid", blockID) + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "block") + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if transactionalContentMD5 != nil { + req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) + } + if transactionalContentCrc64 != nil { + req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64)) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// stageBlockResponder handles the response to the StageBlock request. +func (client blockBlobClient) stageBlockResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlockBlobStageBlockResponse{rawResponse: resp.Response()}, err +} + +// StageBlockFromURL the Stage Block operation creates a new block to be committed as part of a blob where the contents +// are read from a URL. +// +// blockID is a valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or +// equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the +// same size for each block. contentLength is the length of the request. sourceURL is specify a URL to the copy source. +// sourceRange is bytes of source data in the specified range. sourceContentMD5 is specify the md5 calculated for the +// range of bytes that must be read from the copy source. sourceContentcrc64 is specify the crc64 calculated for the +// range of bytes that must be read from the copy source. timeout is the timeout parameter is expressed in seconds. For +// more information, see Setting +// Timeouts for Blob Service Operations. encryptionKey is optional. Specifies the encryption key to use to encrypt +// the data provided in the request. If not specified, encryption is performed with the root account encryption key. +// For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of +// the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is +// the algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be +// provided if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. +// Specifies the name of the encryption scope to use to encrypt the data provided in the request. If not specified, +// encryption is performed with the default account encryption scope. For more information, see Encryption at Rest for +// Azure Storage Services. leaseID is if specified, the operation only succeeds if the resource's lease is active and +// matches this ID. sourceIfModifiedSince is specify this header value to operate only on a blob if it has been +// modified since the specified date/time. sourceIfUnmodifiedSince is specify this header value to operate only on a +// blob if it has not been modified since the specified date/time. sourceIfMatch is specify an ETag value to operate +// only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs without a +// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded +// in the analytics logs when storage analytics logging is enabled. copySourceAuthorization is only Bearer type is +// supported. Credentials should be a valid OAuth access token to copy source. +func (client blockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string, copySourceAuthorization *string) (*BlockBlobStageBlockFromURLResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.stageBlockFromURLPreparer(blockID, contentLength, sourceURL, sourceRange, sourceContentMD5, sourceContentcrc64, timeout, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, leaseID, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID, copySourceAuthorization) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.stageBlockFromURLResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlockBlobStageBlockFromURLResponse), err +} + +// stageBlockFromURLPreparer prepares the StageBlockFromURL request. +func (client blockBlobClient) stageBlockFromURLPreparer(blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string, copySourceAuthorization *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + params.Set("blockid", blockID) + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "block") + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + req.Header.Set("x-ms-copy-source", sourceURL) + if sourceRange != nil { + req.Header.Set("x-ms-source-range", *sourceRange) + } + if sourceContentMD5 != nil { + req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) + } + if sourceContentcrc64 != nil { + req.Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(sourceContentcrc64)) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if sourceIfModifiedSince != nil { + req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) + } + if sourceIfUnmodifiedSince != nil { + req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if sourceIfMatch != nil { + req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) + } + if sourceIfNoneMatch != nil { + req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if copySourceAuthorization != nil { + req.Header.Set("x-ms-copy-source-authorization", *copySourceAuthorization) + } + return req, nil +} + +// stageBlockFromURLResponder handles the response to the StageBlockFromURL request. +func (client blockBlobClient) stageBlockFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlockBlobStageBlockFromURLResponse{rawResponse: resp.Response()}, err +} + +// Upload the Upload Block Blob operation updates the content of an existing block blob. Updating an existing block +// blob overwrites any existing metadata on the blob. Partial updates are not supported with Put Blob; the content of +// the existing blob is overwritten with the content of the new blob. To perform a partial update of the content of a +// block blob, use the Put Block List operation. +// +// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an +// error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more +// information, see Setting +// Timeouts for Blob Service Operations. transactionalContentMD5 is specify the transactional md5 for the body, to +// be validated by the service. blobContentType is optional. Sets the blob's content type. If specified, this property +// is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the blob's content +// encoding. If specified, this property is stored with the blob and returned with a read request. blobContentLanguage +// is optional. Set the blob's content language. If specified, this property is stored with the blob and returned with +// a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this hash is not validated, +// as the hashes for the individual blocks were validated when each was uploaded. blobCacheControl is optional. Sets +// the blob's cache control. If specified, this property is stored with the blob and returned with a read request. +// metadata is optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are +// specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more +// name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not +// copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the +// naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. +// leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. +// blobContentDisposition is optional. Sets the blob's Content-Disposition header. encryptionKey is optional. Specifies +// the encryption key to use to encrypt the data provided in the request. If not specified, encryption is performed +// with the root account encryption key. For more information, see Encryption at Rest for Azure Storage Services. +// encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key +// header is provided. encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the +// only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. encryptionScope is +// optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data +// provided in the request. If not specified, encryption is performed with the default account encryption scope. For +// more information, see Encryption at Rest for Azure Storage Services. tier is optional. Indicates the tier to be set +// on the blob. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since +// the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been +// modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching +// value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a +// SQL where clause on blob tags to operate only on blobs with a matching value. requestID is provides a +// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. blobTagsString is optional. Used to set blob tags in various blob operations. +// immutabilityPolicyExpiry is specifies the date time when the blobs immutability policy is set to expire. +// immutabilityPolicyMode is specifies the immutability policy mode to set on the blob. legalHold is specified if a +// legal hold should be set on the blob. +func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (*BlockBlobUploadResponse, error) { + if err := validate([]validation{ + {targetValue: body, + constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.uploadPreparer(body, contentLength, timeout, transactionalContentMD5, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID, blobTagsString, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlockBlobUploadResponse), err +} + +// uploadPreparer prepares the Upload request. +func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, body) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + if transactionalContentMD5 != nil { + req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) + } + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if blobContentType != nil { + req.Header.Set("x-ms-blob-content-type", *blobContentType) + } + if blobContentEncoding != nil { + req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) + } + if blobContentLanguage != nil { + req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) + } + if blobContentMD5 != nil { + req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) + } + if blobCacheControl != nil { + req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) + } + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if blobContentDisposition != nil { + req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if tier != AccessTierNone { + req.Header.Set("x-ms-access-tier", string(tier)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if blobTagsString != nil { + req.Header.Set("x-ms-tags", *blobTagsString) + } + if immutabilityPolicyExpiry != nil { + req.Header.Set("x-ms-immutability-policy-until-date", (*immutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)) + } + if immutabilityPolicyMode != BlobImmutabilityPolicyModeNone { + req.Header.Set("x-ms-immutability-policy-mode", string(immutabilityPolicyMode)) + } + if legalHold != nil { + req.Header.Set("x-ms-legal-hold", strconv.FormatBool(*legalHold)) + } + req.Header.Set("x-ms-blob-type", "BlockBlob") + return req, nil +} + +// uploadResponder handles the response to the Upload request. +func (client blockBlobClient) uploadResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlockBlobUploadResponse{rawResponse: resp.Response()}, err +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go new file mode 100644 index 000000000..0db347e34 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go @@ -0,0 +1,38 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/azure-pipeline-go/pipeline" + "net/url" +) + +const ( + // ServiceVersion specifies the version of the operations used in this package. + ServiceVersion = "2020-10-02" +) + +// managementClient is the base client for Azblob. +type managementClient struct { + url url.URL + p pipeline.Pipeline +} + +// newManagementClient creates an instance of the managementClient client. +func newManagementClient(url url.URL, p pipeline.Pipeline) managementClient { + return managementClient{ + url: url, + p: p, + } +} + +// URL returns a copy of the URL for this client. +func (mc managementClient) URL() url.URL { + return mc.url +} + +// Pipeline returns the pipeline for this client. +func (mc managementClient) Pipeline() pipeline.Pipeline { + return mc.p +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go new file mode 100644 index 000000000..2e2f176e5 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go @@ -0,0 +1,1232 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "bytes" + "context" + "encoding/xml" + "github.com/Azure/azure-pipeline-go/pipeline" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" +) + +// containerClient is the client for the Container methods of the Azblob service. +type containerClient struct { + managementClient +} + +// newContainerClient creates an instance of the containerClient client. +func newContainerClient(url url.URL, p pipeline.Pipeline) containerClient { + return containerClient{newManagementClient(url, p)} +} + +// AcquireLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be +// 15 to 60 seconds, or can be infinite +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. duration is specifies the duration of the lease, in seconds, or negative +// one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease duration +// cannot be changed using renew or change. proposedLeaseID is proposed lease ID, in a GUID string format. The Blob +// service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor +// (String) for a list of valid GUID string formats. ifModifiedSince is specify this header value to operate only on a +// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. requestID is provides a +// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. +func (client containerClient) AcquireLease(ctx context.Context, timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerAcquireLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.acquireLeasePreparer(timeout, duration, proposedLeaseID, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.acquireLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerAcquireLeaseResponse), err +} + +// acquireLeasePreparer prepares the AcquireLease request. +func (client containerClient) acquireLeasePreparer(timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + if duration != nil { + req.Header.Set("x-ms-lease-duration", strconv.FormatInt(int64(*duration), 10)) + } + if proposedLeaseID != nil { + req.Header.Set("x-ms-proposed-lease-id", *proposedLeaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "acquire") + return req, nil +} + +// acquireLeaseResponder handles the response to the AcquireLease request. +func (client containerClient) acquireLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerAcquireLeaseResponse{rawResponse: resp.Response()}, err +} + +// BreakLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. breakPeriod is for a break operation, proposed duration the lease should +// continue before it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter than the +// time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available +// before the break period has expired, but the lease may be held for longer than the break period. If this header does +// not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an +// infinite lease breaks immediately. ifModifiedSince is specify this header value to operate only on a blob if it has +// been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a +// blob if it has not been modified since the specified date/time. requestID is provides a client-generated, opaque +// value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) BreakLease(ctx context.Context, timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerBreakLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.breakLeasePreparer(timeout, breakPeriod, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.breakLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerBreakLeaseResponse), err +} + +// breakLeasePreparer prepares the BreakLease request. +func (client containerClient) breakLeasePreparer(timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + if breakPeriod != nil { + req.Header.Set("x-ms-lease-break-period", strconv.FormatInt(int64(*breakPeriod), 10)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "break") + return req, nil +} + +// breakLeaseResponder handles the response to the BreakLease request. +func (client containerClient) breakLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerBreakLeaseResponse{rawResponse: resp.Response()}, err +} + +// ChangeLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be +// 15 to 60 seconds, or can be infinite +// +// leaseID is specifies the current lease ID on the resource. proposedLeaseID is proposed lease ID, in a GUID string +// format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See +// Guid Constructor (String) for a list of valid GUID string formats. timeout is the timeout parameter is expressed in +// seconds. For more information, see Setting +// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. requestID is provides a client-generated, +// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is +// enabled. +func (client containerClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerChangeLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.changeLeasePreparer(leaseID, proposedLeaseID, timeout, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.changeLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerChangeLeaseResponse), err +} + +// changeLeasePreparer prepares the ChangeLease request. +func (client containerClient) changeLeasePreparer(leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-lease-id", leaseID) + req.Header.Set("x-ms-proposed-lease-id", proposedLeaseID) + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "change") + return req, nil +} + +// changeLeaseResponder handles the response to the ChangeLease request. +func (client containerClient) changeLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerChangeLeaseResponse{rawResponse: resp.Response()}, err +} + +// Create creates a new container under the specified account. If the container with the same name already exists, the +// operation fails +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. metadata is optional. Specifies a user-defined name-value pair associated +// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or +// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with +// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version +// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing +// Containers, Blobs, and Metadata for more information. access is specifies whether data in the container may be +// accessed publicly and the level of access requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +// defaultEncryptionScope is optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on +// the container and use for all future writes. preventEncryptionScopeOverride is optional. Version 2019-07-07 and +// newer. If true, prevents any request from specifying a different encryption scope than the scope set on the +// container. +func (client containerClient) Create(ctx context.Context, timeout *int32, metadata map[string]string, access PublicAccessType, requestID *string, defaultEncryptionScope *string, preventEncryptionScopeOverride *bool) (*ContainerCreateResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.createPreparer(timeout, metadata, access, requestID, defaultEncryptionScope, preventEncryptionScopeOverride) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerCreateResponse), err +} + +// createPreparer prepares the Create request. +func (client containerClient) createPreparer(timeout *int32, metadata map[string]string, access PublicAccessType, requestID *string, defaultEncryptionScope *string, preventEncryptionScopeOverride *bool) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if access != PublicAccessNone { + req.Header.Set("x-ms-blob-public-access", string(access)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if defaultEncryptionScope != nil { + req.Header.Set("x-ms-default-encryption-scope", *defaultEncryptionScope) + } + if preventEncryptionScopeOverride != nil { + req.Header.Set("x-ms-deny-encryption-scope-override", strconv.FormatBool(*preventEncryptionScopeOverride)) + } + return req, nil +} + +// createResponder handles the response to the Create request. +func (client containerClient) createResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerCreateResponse{rawResponse: resp.Response()}, err +} + +// Delete operation marks the specified container for deletion. The container and any blobs contained within it are +// later deleted during garbage collection +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it +// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a +// blob if it has not been modified since the specified date/time. requestID is provides a client-generated, opaque +// value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) Delete(ctx context.Context, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerDeleteResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.deletePreparer(timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.deleteResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerDeleteResponse), err +} + +// deletePreparer prepares the Delete request. +func (client containerClient) deletePreparer(timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("DELETE", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// deleteResponder handles the response to the Delete request. +func (client containerClient) deleteResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerDeleteResponse{rawResponse: resp.Response()}, err +} + +// GetAccessPolicy gets the permissions for the specified container. The permissions indicate whether container data +// may be accessed publicly. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character +// limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) GetAccessPolicy(ctx context.Context, timeout *int32, leaseID *string, requestID *string) (*SignedIdentifiers, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getAccessPolicyPreparer(timeout, leaseID, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccessPolicyResponder}, req) + if err != nil { + return nil, err + } + return resp.(*SignedIdentifiers), err +} + +// getAccessPolicyPreparer prepares the GetAccessPolicy request. +func (client containerClient) getAccessPolicyPreparer(timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + params.Set("comp", "acl") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getAccessPolicyResponder handles the response to the GetAccessPolicy request. +func (client containerClient) getAccessPolicyResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &SignedIdentifiers{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// GetAccountInfo returns the sku name and account kind +func (client containerClient) GetAccountInfo(ctx context.Context) (*ContainerGetAccountInfoResponse, error) { + req, err := client.getAccountInfoPreparer() + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccountInfoResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerGetAccountInfoResponse), err +} + +// getAccountInfoPreparer prepares the GetAccountInfo request. +func (client containerClient) getAccountInfoPreparer() (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + params.Set("restype", "account") + params.Set("comp", "properties") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + return req, nil +} + +// getAccountInfoResponder handles the response to the GetAccountInfo request. +func (client containerClient) getAccountInfoResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerGetAccountInfoResponse{rawResponse: resp.Response()}, err +} + +// GetProperties returns all user-defined metadata and system properties for the specified container. The data returned +// does not include the container's list of blobs +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character +// limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) GetProperties(ctx context.Context, timeout *int32, leaseID *string, requestID *string) (*ContainerGetPropertiesResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getPropertiesPreparer(timeout, leaseID, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPropertiesResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerGetPropertiesResponse), err +} + +// getPropertiesPreparer prepares the GetProperties request. +func (client containerClient) getPropertiesPreparer(timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getPropertiesResponder handles the response to the GetProperties request. +func (client containerClient) getPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerGetPropertiesResponse{rawResponse: resp.Response()}, err +} + +// ListBlobFlatSegment [Update] The List Blobs operation returns a list of the blobs under the specified container +// +// prefix is filters the results to return only containers whose name begins with the specified prefix. marker is a +// string value that identifies the portion of the list of containers to be returned with the next listing operation. +// The operation returns the NextMarker value within the response body if the listing operation did not return all +// containers remaining to be listed with the current page. The NextMarker value can be used as the value for the +// marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the +// client. maxresults is specifies the maximum number of containers to return. If the request does not specify +// maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the +// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the +// remainder of the results. For this reason, it is possible that the service will return fewer results than specified +// by maxresults, or than the default of 5000. include is include this parameter to specify one or more datasets to +// include in the response. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) ListBlobFlatSegment(ctx context.Context, prefix *string, marker *string, maxresults *int32, include []ListBlobsIncludeItemType, timeout *int32, requestID *string) (*ListBlobsFlatSegmentResponse, error) { + if err := validate([]validation{ + {targetValue: maxresults, + constraints: []constraint{{target: "maxresults", name: null, rule: false, + chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.listBlobFlatSegmentPreparer(prefix, marker, maxresults, include, timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.listBlobFlatSegmentResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ListBlobsFlatSegmentResponse), err +} + +// listBlobFlatSegmentPreparer prepares the ListBlobFlatSegment request. +func (client containerClient) listBlobFlatSegmentPreparer(prefix *string, marker *string, maxresults *int32, include []ListBlobsIncludeItemType, timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if prefix != nil && len(*prefix) > 0 { + params.Set("prefix", *prefix) + } + if marker != nil && len(*marker) > 0 { + params.Set("marker", *marker) + } + if maxresults != nil { + params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10)) + } + if include != nil && len(include) > 0 { + params.Set("include", joinConst(include, ",")) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + params.Set("comp", "list") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// listBlobFlatSegmentResponder handles the response to the ListBlobFlatSegment request. +func (client containerClient) listBlobFlatSegmentResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &ListBlobsFlatSegmentResponse{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// ListBlobHierarchySegment [Update] The List Blobs operation returns a list of the blobs under the specified container +// +// delimiter is when the request includes this parameter, the operation returns a BlobPrefix element in the response +// body that acts as a placeholder for all blobs whose names begin with the same substring up to the appearance of the +// delimiter character. The delimiter may be a single character or a string. prefix is filters the results to return +// only containers whose name begins with the specified prefix. marker is a string value that identifies the portion of +// the list of containers to be returned with the next listing operation. The operation returns the NextMarker value +// within the response body if the listing operation did not return all containers remaining to be listed with the +// current page. The NextMarker value can be used as the value for the marker parameter in a subsequent call to request +// the next page of list items. The marker value is opaque to the client. maxresults is specifies the maximum number of +// containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server +// will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will +// return a continuation token for retrieving the remainder of the results. For this reason, it is possible that the +// service will return fewer results than specified by maxresults, or than the default of 5000. include is include this +// parameter to specify one or more datasets to include in the response. timeout is the timeout parameter is expressed +// in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) ListBlobHierarchySegment(ctx context.Context, delimiter string, prefix *string, marker *string, maxresults *int32, include []ListBlobsIncludeItemType, timeout *int32, requestID *string) (*ListBlobsHierarchySegmentResponse, error) { + if err := validate([]validation{ + {targetValue: maxresults, + constraints: []constraint{{target: "maxresults", name: null, rule: false, + chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.listBlobHierarchySegmentPreparer(delimiter, prefix, marker, maxresults, include, timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.listBlobHierarchySegmentResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ListBlobsHierarchySegmentResponse), err +} + +// listBlobHierarchySegmentPreparer prepares the ListBlobHierarchySegment request. +func (client containerClient) listBlobHierarchySegmentPreparer(delimiter string, prefix *string, marker *string, maxresults *int32, include []ListBlobsIncludeItemType, timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if prefix != nil && len(*prefix) > 0 { + params.Set("prefix", *prefix) + } + params.Set("delimiter", delimiter) + if marker != nil && len(*marker) > 0 { + params.Set("marker", *marker) + } + if maxresults != nil { + params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10)) + } + if include != nil && len(include) > 0 { + params.Set("include", joinConst(include, ",")) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + params.Set("comp", "list") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// listBlobHierarchySegmentResponder handles the response to the ListBlobHierarchySegment request. +func (client containerClient) listBlobHierarchySegmentResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &ListBlobsHierarchySegmentResponse{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// ReleaseLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be +// 15 to 60 seconds, or can be infinite +// +// leaseID is specifies the current lease ID on the resource. timeout is the timeout parameter is expressed in seconds. +// For more information, see Setting +// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. requestID is provides a client-generated, +// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is +// enabled. +func (client containerClient) ReleaseLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerReleaseLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.releaseLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.releaseLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerReleaseLeaseResponse), err +} + +// releaseLeasePreparer prepares the ReleaseLease request. +func (client containerClient) releaseLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-lease-id", leaseID) + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "release") + return req, nil +} + +// releaseLeaseResponder handles the response to the ReleaseLease request. +func (client containerClient) releaseLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerReleaseLeaseResponse{rawResponse: resp.Response()}, err +} + +// Rename renames an existing container. +// +// sourceContainerName is required. Specifies the name of the container to rename. timeout is the timeout parameter is +// expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. sourceLeaseID is a +// lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match. +func (client containerClient) Rename(ctx context.Context, sourceContainerName string, timeout *int32, requestID *string, sourceLeaseID *string) (*ContainerRenameResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.renamePreparer(sourceContainerName, timeout, requestID, sourceLeaseID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.renameResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerRenameResponse), err +} + +// renamePreparer prepares the Rename request. +func (client containerClient) renamePreparer(sourceContainerName string, timeout *int32, requestID *string, sourceLeaseID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + params.Set("comp", "rename") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-source-container-name", sourceContainerName) + if sourceLeaseID != nil { + req.Header.Set("x-ms-source-lease-id", *sourceLeaseID) + } + return req, nil +} + +// renameResponder handles the response to the Rename request. +func (client containerClient) renameResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerRenameResponse{rawResponse: resp.Response()}, err +} + +// RenewLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// +// leaseID is specifies the current lease ID on the resource. timeout is the timeout parameter is expressed in seconds. +// For more information, see Setting +// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. requestID is provides a client-generated, +// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is +// enabled. +func (client containerClient) RenewLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerRenewLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.renewLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.renewLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerRenewLeaseResponse), err +} + +// renewLeasePreparer prepares the RenewLease request. +func (client containerClient) renewLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-lease-id", leaseID) + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "renew") + return req, nil +} + +// renewLeaseResponder handles the response to the RenewLease request. +func (client containerClient) renewLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerRenewLeaseResponse{rawResponse: resp.Response()}, err +} + +// Restore restores a previously-deleted container. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +// deletedContainerName is optional. Version 2019-12-12 and later. Specifies the name of the deleted container to +// restore. deletedContainerVersion is optional. Version 2019-12-12 and later. Specifies the version of the deleted +// container to restore. +func (client containerClient) Restore(ctx context.Context, timeout *int32, requestID *string, deletedContainerName *string, deletedContainerVersion *string) (*ContainerRestoreResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.restorePreparer(timeout, requestID, deletedContainerName, deletedContainerVersion) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.restoreResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerRestoreResponse), err +} + +// restorePreparer prepares the Restore request. +func (client containerClient) restorePreparer(timeout *int32, requestID *string, deletedContainerName *string, deletedContainerVersion *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + params.Set("comp", "undelete") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if deletedContainerName != nil { + req.Header.Set("x-ms-deleted-container-name", *deletedContainerName) + } + if deletedContainerVersion != nil { + req.Header.Set("x-ms-deleted-container-version", *deletedContainerVersion) + } + return req, nil +} + +// restoreResponder handles the response to the Restore request. +func (client containerClient) restoreResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerRestoreResponse{rawResponse: resp.Response()}, err +} + +// SetAccessPolicy sets the permissions for the specified container. The permissions indicate whether blobs in a +// container may be accessed publicly. +// +// containerACL is the acls for the container timeout is the timeout parameter is expressed in seconds. For more +// information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. access is specifies whether data in the container may be accessed publicly and +// the level of access ifModifiedSince is specify this header value to operate only on a blob if it has been modified +// since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has +// not been modified since the specified date/time. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) SetAccessPolicy(ctx context.Context, containerACL []SignedIdentifier, timeout *int32, leaseID *string, access PublicAccessType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerSetAccessPolicyResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setAccessPolicyPreparer(containerACL, timeout, leaseID, access, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setAccessPolicyResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerSetAccessPolicyResponse), err +} + +// setAccessPolicyPreparer prepares the SetAccessPolicy request. +func (client containerClient) setAccessPolicyPreparer(containerACL []SignedIdentifier, timeout *int32, leaseID *string, access PublicAccessType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + params.Set("comp", "acl") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if access != PublicAccessNone { + req.Header.Set("x-ms-blob-public-access", string(access)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + b, err := xml.Marshal(SignedIdentifiers{Items: containerACL}) + if err != nil { + return req, pipeline.NewError(err, "failed to marshal request body") + } + req.Header.Set("Content-Type", "application/xml") + err = req.SetBody(bytes.NewReader(b)) + if err != nil { + return req, pipeline.NewError(err, "failed to set request body") + } + return req, nil +} + +// setAccessPolicyResponder handles the response to the SetAccessPolicy request. +func (client containerClient) setAccessPolicyResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerSetAccessPolicyResponse{rawResponse: resp.Response()}, err +} + +// SetMetadata operation sets one or more user-defined name-value pairs for the specified container. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. metadata is optional. Specifies a user-defined name-value pair associated with +// the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to +// the destination blob. If one or more name-value pairs are specified, the destination blob is created with the +// specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version +// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing +// Containers, Blobs, and Metadata for more information. ifModifiedSince is specify this header value to operate only +// on a blob if it has been modified since the specified date/time. requestID is provides a client-generated, opaque +// value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) SetMetadata(ctx context.Context, timeout *int32, leaseID *string, metadata map[string]string, ifModifiedSince *time.Time, requestID *string) (*ContainerSetMetadataResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setMetadataPreparer(timeout, leaseID, metadata, ifModifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setMetadataResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerSetMetadataResponse), err +} + +// setMetadataPreparer prepares the SetMetadata request. +func (client containerClient) setMetadataPreparer(timeout *int32, leaseID *string, metadata map[string]string, ifModifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + params.Set("comp", "metadata") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// setMetadataResponder handles the response to the SetMetadata request. +func (client containerClient) setMetadataResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerSetMetadataResponse{rawResponse: resp.Response()}, err +} + +// SubmitBatch the Batch operation allows multiple API calls to be embedded into a single HTTP request. +// +// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an +// error.contentLength is the length of the request. multipartContentType is required. The value of this header must be +// multipart/mixed with a batch boundary. Example header value: multipart/mixed; boundary=batch_ timeout is the +// timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) SubmitBatch(ctx context.Context, body io.ReadSeeker, contentLength int64, multipartContentType string, timeout *int32, requestID *string) (*SubmitBatchResponse, error) { + if err := validate([]validation{ + {targetValue: body, + constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.submitBatchPreparer(body, contentLength, multipartContentType, timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.submitBatchResponder}, req) + if err != nil { + return nil, err + } + return resp.(*SubmitBatchResponse), err +} + +// submitBatchPreparer prepares the SubmitBatch request. +func (client containerClient) submitBatchPreparer(body io.ReadSeeker, contentLength int64, multipartContentType string, timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("POST", client.url, body) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + params.Set("comp", "batch") + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + req.Header.Set("Content-Type", multipartContentType) + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// submitBatchResponder handles the response to the SubmitBatch request. +func (client containerClient) submitBatchResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + return &SubmitBatchResponse{rawResponse: resp.Response()}, err +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go new file mode 100644 index 000000000..ec872a322 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go @@ -0,0 +1,7302 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "encoding/base64" + "encoding/xml" + "errors" + "io" + "net/http" + "reflect" + "strconv" + "strings" + "time" + "unsafe" +) + +// ETag is an entity tag. +type ETag string + +const ( + // ETagNone represents an empty entity tag. + ETagNone ETag = "" + + // ETagAny matches any entity tag. + ETagAny ETag = "*" +) + +// Metadata contains metadata key/value pairs. +type Metadata map[string]string + +const mdPrefix = "x-ms-meta-" + +const mdPrefixLen = len(mdPrefix) + +// UnmarshalXML implements the xml.Unmarshaler interface for Metadata. +func (md *Metadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + tokName := "" + for t, err := d.Token(); err == nil; t, err = d.Token() { + switch tt := t.(type) { + case xml.StartElement: + tokName = strings.ToLower(tt.Name.Local) + break + case xml.CharData: + if *md == nil { + *md = Metadata{} + } + (*md)[tokName] = string(tt) + break + } + } + return nil +} + +// Marker represents an opaque value used in paged responses. +type Marker struct { + Val *string +} + +// NotDone returns true if the list enumeration should be started or is not yet complete. Specifically, NotDone returns true +// for a just-initialized (zero value) Marker indicating that you should make an initial request to get a result portion from +// the service. NotDone also returns true whenever the service returns an interim result portion. NotDone returns false only +// after the service has returned the final result portion. +func (m Marker) NotDone() bool { + return m.Val == nil || *m.Val != "" +} + +// UnmarshalXML implements the xml.Unmarshaler interface for Marker. +func (m *Marker) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var out string + err := d.DecodeElement(&out, &start) + m.Val = &out + return err +} + +// concatenates a slice of const values with the specified separator between each item +func joinConst(s interface{}, sep string) string { + v := reflect.ValueOf(s) + if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { + panic("s wasn't a slice or array") + } + ss := make([]string, 0, v.Len()) + for i := 0; i < v.Len(); i++ { + ss = append(ss, v.Index(i).String()) + } + return strings.Join(ss, sep) +} + +func validateError(err error) { + if err != nil { + panic(err) + } +} + +// AccessTierType enumerates the values for access tier type. +type AccessTierType string + +const ( + // AccessTierArchive ... + AccessTierArchive AccessTierType = "Archive" + // AccessTierCool ... + AccessTierCool AccessTierType = "Cool" + // AccessTierHot ... + AccessTierHot AccessTierType = "Hot" + // AccessTierNone represents an empty AccessTierType. + AccessTierNone AccessTierType = "" + // AccessTierP10 ... + AccessTierP10 AccessTierType = "P10" + // AccessTierP15 ... + AccessTierP15 AccessTierType = "P15" + // AccessTierP20 ... + AccessTierP20 AccessTierType = "P20" + // AccessTierP30 ... + AccessTierP30 AccessTierType = "P30" + // AccessTierP4 ... + AccessTierP4 AccessTierType = "P4" + // AccessTierP40 ... + AccessTierP40 AccessTierType = "P40" + // AccessTierP50 ... + AccessTierP50 AccessTierType = "P50" + // AccessTierP6 ... + AccessTierP6 AccessTierType = "P6" + // AccessTierP60 ... + AccessTierP60 AccessTierType = "P60" + // AccessTierP70 ... + AccessTierP70 AccessTierType = "P70" + // AccessTierP80 ... + AccessTierP80 AccessTierType = "P80" +) + +// PossibleAccessTierTypeValues returns an array of possible values for the AccessTierType const type. +func PossibleAccessTierTypeValues() []AccessTierType { + return []AccessTierType{AccessTierArchive, AccessTierCool, AccessTierHot, AccessTierNone, AccessTierP10, AccessTierP15, AccessTierP20, AccessTierP30, AccessTierP4, AccessTierP40, AccessTierP50, AccessTierP6, AccessTierP60, AccessTierP70, AccessTierP80} +} + +// AccountKindType enumerates the values for account kind type. +type AccountKindType string + +const ( + // AccountKindBlobStorage ... + AccountKindBlobStorage AccountKindType = "BlobStorage" + // AccountKindBlockBlobStorage ... + AccountKindBlockBlobStorage AccountKindType = "BlockBlobStorage" + // AccountKindFileStorage ... + AccountKindFileStorage AccountKindType = "FileStorage" + // AccountKindNone represents an empty AccountKindType. + AccountKindNone AccountKindType = "" + // AccountKindStorage ... + AccountKindStorage AccountKindType = "Storage" + // AccountKindStorageV2 ... + AccountKindStorageV2 AccountKindType = "StorageV2" +) + +// PossibleAccountKindTypeValues returns an array of possible values for the AccountKindType const type. +func PossibleAccountKindTypeValues() []AccountKindType { + return []AccountKindType{AccountKindBlobStorage, AccountKindBlockBlobStorage, AccountKindFileStorage, AccountKindNone, AccountKindStorage, AccountKindStorageV2} +} + +// ArchiveStatusType enumerates the values for archive status type. +type ArchiveStatusType string + +const ( + // ArchiveStatusNone represents an empty ArchiveStatusType. + ArchiveStatusNone ArchiveStatusType = "" + // ArchiveStatusRehydratePendingToCool ... + ArchiveStatusRehydratePendingToCool ArchiveStatusType = "rehydrate-pending-to-cool" + // ArchiveStatusRehydratePendingToHot ... + ArchiveStatusRehydratePendingToHot ArchiveStatusType = "rehydrate-pending-to-hot" +) + +// PossibleArchiveStatusTypeValues returns an array of possible values for the ArchiveStatusType const type. +func PossibleArchiveStatusTypeValues() []ArchiveStatusType { + return []ArchiveStatusType{ArchiveStatusNone, ArchiveStatusRehydratePendingToCool, ArchiveStatusRehydratePendingToHot} +} + +// BlobDeleteType enumerates the values for blob delete type. +type BlobDeleteType string + +const ( + // BlobDeleteNone represents an empty BlobDeleteType. + BlobDeleteNone BlobDeleteType = "" + // BlobDeletePermanent ... + BlobDeletePermanent BlobDeleteType = "Permanent" +) + +// PossibleBlobDeleteTypeValues returns an array of possible values for the BlobDeleteType const type. +func PossibleBlobDeleteTypeValues() []BlobDeleteType { + return []BlobDeleteType{BlobDeleteNone, BlobDeletePermanent} +} + +// BlobExpiryOptionsType enumerates the values for blob expiry options type. +type BlobExpiryOptionsType string + +const ( + // BlobExpiryOptionsAbsolute ... + BlobExpiryOptionsAbsolute BlobExpiryOptionsType = "Absolute" + // BlobExpiryOptionsNeverExpire ... + BlobExpiryOptionsNeverExpire BlobExpiryOptionsType = "NeverExpire" + // BlobExpiryOptionsNone represents an empty BlobExpiryOptionsType. + BlobExpiryOptionsNone BlobExpiryOptionsType = "" + // BlobExpiryOptionsRelativeToCreation ... + BlobExpiryOptionsRelativeToCreation BlobExpiryOptionsType = "RelativeToCreation" + // BlobExpiryOptionsRelativeToNow ... + BlobExpiryOptionsRelativeToNow BlobExpiryOptionsType = "RelativeToNow" +) + +// PossibleBlobExpiryOptionsTypeValues returns an array of possible values for the BlobExpiryOptionsType const type. +func PossibleBlobExpiryOptionsTypeValues() []BlobExpiryOptionsType { + return []BlobExpiryOptionsType{BlobExpiryOptionsAbsolute, BlobExpiryOptionsNeverExpire, BlobExpiryOptionsNone, BlobExpiryOptionsRelativeToCreation, BlobExpiryOptionsRelativeToNow} +} + +// BlobImmutabilityPolicyModeType enumerates the values for blob immutability policy mode type. +type BlobImmutabilityPolicyModeType string + +const ( + // BlobImmutabilityPolicyModeLocked ... + BlobImmutabilityPolicyModeLocked BlobImmutabilityPolicyModeType = "locked" + // BlobImmutabilityPolicyModeMutable ... + BlobImmutabilityPolicyModeMutable BlobImmutabilityPolicyModeType = "mutable" + // BlobImmutabilityPolicyModeNone represents an empty BlobImmutabilityPolicyModeType. + BlobImmutabilityPolicyModeNone BlobImmutabilityPolicyModeType = "" + // BlobImmutabilityPolicyModeUnlocked ... + BlobImmutabilityPolicyModeUnlocked BlobImmutabilityPolicyModeType = "unlocked" +) + +// PossibleBlobImmutabilityPolicyModeTypeValues returns an array of possible values for the BlobImmutabilityPolicyModeType const type. +func PossibleBlobImmutabilityPolicyModeTypeValues() []BlobImmutabilityPolicyModeType { + return []BlobImmutabilityPolicyModeType{BlobImmutabilityPolicyModeLocked, BlobImmutabilityPolicyModeMutable, BlobImmutabilityPolicyModeNone, BlobImmutabilityPolicyModeUnlocked} +} + +// BlobType enumerates the values for blob type. +type BlobType string + +const ( + // BlobAppendBlob ... + BlobAppendBlob BlobType = "AppendBlob" + // BlobBlockBlob ... + BlobBlockBlob BlobType = "BlockBlob" + // BlobNone represents an empty BlobType. + BlobNone BlobType = "" + // BlobPageBlob ... + BlobPageBlob BlobType = "PageBlob" +) + +// PossibleBlobTypeValues returns an array of possible values for the BlobType const type. +func PossibleBlobTypeValues() []BlobType { + return []BlobType{BlobAppendBlob, BlobBlockBlob, BlobNone, BlobPageBlob} +} + +// BlockListType enumerates the values for block list type. +type BlockListType string + +const ( + // BlockListAll ... + BlockListAll BlockListType = "all" + // BlockListCommitted ... + BlockListCommitted BlockListType = "committed" + // BlockListNone represents an empty BlockListType. + BlockListNone BlockListType = "" + // BlockListUncommitted ... + BlockListUncommitted BlockListType = "uncommitted" +) + +// PossibleBlockListTypeValues returns an array of possible values for the BlockListType const type. +func PossibleBlockListTypeValues() []BlockListType { + return []BlockListType{BlockListAll, BlockListCommitted, BlockListNone, BlockListUncommitted} +} + +// CopyStatusType enumerates the values for copy status type. +type CopyStatusType string + +const ( + // CopyStatusAborted ... + CopyStatusAborted CopyStatusType = "aborted" + // CopyStatusFailed ... + CopyStatusFailed CopyStatusType = "failed" + // CopyStatusNone represents an empty CopyStatusType. + CopyStatusNone CopyStatusType = "" + // CopyStatusPending ... + CopyStatusPending CopyStatusType = "pending" + // CopyStatusSuccess ... + CopyStatusSuccess CopyStatusType = "success" +) + +// PossibleCopyStatusTypeValues returns an array of possible values for the CopyStatusType const type. +func PossibleCopyStatusTypeValues() []CopyStatusType { + return []CopyStatusType{CopyStatusAborted, CopyStatusFailed, CopyStatusNone, CopyStatusPending, CopyStatusSuccess} +} + +// DeleteSnapshotsOptionType enumerates the values for delete snapshots option type. +type DeleteSnapshotsOptionType string + +const ( + // DeleteSnapshotsOptionInclude ... + DeleteSnapshotsOptionInclude DeleteSnapshotsOptionType = "include" + // DeleteSnapshotsOptionNone represents an empty DeleteSnapshotsOptionType. + DeleteSnapshotsOptionNone DeleteSnapshotsOptionType = "" + // DeleteSnapshotsOptionOnly ... + DeleteSnapshotsOptionOnly DeleteSnapshotsOptionType = "only" +) + +// PossibleDeleteSnapshotsOptionTypeValues returns an array of possible values for the DeleteSnapshotsOptionType const type. +func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { + return []DeleteSnapshotsOptionType{DeleteSnapshotsOptionInclude, DeleteSnapshotsOptionNone, DeleteSnapshotsOptionOnly} +} + +// EncryptionAlgorithmType enumerates the values for encryption algorithm type. +type EncryptionAlgorithmType string + +const ( + // EncryptionAlgorithmAES256 ... + EncryptionAlgorithmAES256 EncryptionAlgorithmType = "AES256" + // EncryptionAlgorithmNone represents an empty EncryptionAlgorithmType. + EncryptionAlgorithmNone EncryptionAlgorithmType = "" +) + +// PossibleEncryptionAlgorithmTypeValues returns an array of possible values for the EncryptionAlgorithmType const type. +func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType { + return []EncryptionAlgorithmType{EncryptionAlgorithmAES256, EncryptionAlgorithmNone} +} + +// GeoReplicationStatusType enumerates the values for geo replication status type. +type GeoReplicationStatusType string + +const ( + // GeoReplicationStatusBootstrap ... + GeoReplicationStatusBootstrap GeoReplicationStatusType = "bootstrap" + // GeoReplicationStatusLive ... + GeoReplicationStatusLive GeoReplicationStatusType = "live" + // GeoReplicationStatusNone represents an empty GeoReplicationStatusType. + GeoReplicationStatusNone GeoReplicationStatusType = "" + // GeoReplicationStatusUnavailable ... + GeoReplicationStatusUnavailable GeoReplicationStatusType = "unavailable" +) + +// PossibleGeoReplicationStatusTypeValues returns an array of possible values for the GeoReplicationStatusType const type. +func PossibleGeoReplicationStatusTypeValues() []GeoReplicationStatusType { + return []GeoReplicationStatusType{GeoReplicationStatusBootstrap, GeoReplicationStatusLive, GeoReplicationStatusNone, GeoReplicationStatusUnavailable} +} + +// LeaseDurationType enumerates the values for lease duration type. +type LeaseDurationType string + +const ( + // LeaseDurationFixed ... + LeaseDurationFixed LeaseDurationType = "fixed" + // LeaseDurationInfinite ... + LeaseDurationInfinite LeaseDurationType = "infinite" + // LeaseDurationNone represents an empty LeaseDurationType. + LeaseDurationNone LeaseDurationType = "" +) + +// PossibleLeaseDurationTypeValues returns an array of possible values for the LeaseDurationType const type. +func PossibleLeaseDurationTypeValues() []LeaseDurationType { + return []LeaseDurationType{LeaseDurationFixed, LeaseDurationInfinite, LeaseDurationNone} +} + +// LeaseStateType enumerates the values for lease state type. +type LeaseStateType string + +const ( + // LeaseStateAvailable ... + LeaseStateAvailable LeaseStateType = "available" + // LeaseStateBreaking ... + LeaseStateBreaking LeaseStateType = "breaking" + // LeaseStateBroken ... + LeaseStateBroken LeaseStateType = "broken" + // LeaseStateExpired ... + LeaseStateExpired LeaseStateType = "expired" + // LeaseStateLeased ... + LeaseStateLeased LeaseStateType = "leased" + // LeaseStateNone represents an empty LeaseStateType. + LeaseStateNone LeaseStateType = "" +) + +// PossibleLeaseStateTypeValues returns an array of possible values for the LeaseStateType const type. +func PossibleLeaseStateTypeValues() []LeaseStateType { + return []LeaseStateType{LeaseStateAvailable, LeaseStateBreaking, LeaseStateBroken, LeaseStateExpired, LeaseStateLeased, LeaseStateNone} +} + +// LeaseStatusType enumerates the values for lease status type. +type LeaseStatusType string + +const ( + // LeaseStatusLocked ... + LeaseStatusLocked LeaseStatusType = "locked" + // LeaseStatusNone represents an empty LeaseStatusType. + LeaseStatusNone LeaseStatusType = "" + // LeaseStatusUnlocked ... + LeaseStatusUnlocked LeaseStatusType = "unlocked" +) + +// PossibleLeaseStatusTypeValues returns an array of possible values for the LeaseStatusType const type. +func PossibleLeaseStatusTypeValues() []LeaseStatusType { + return []LeaseStatusType{LeaseStatusLocked, LeaseStatusNone, LeaseStatusUnlocked} +} + +// ListBlobsIncludeItemType enumerates the values for list blobs include item type. +type ListBlobsIncludeItemType string + +const ( + // ListBlobsIncludeItemCopy ... + ListBlobsIncludeItemCopy ListBlobsIncludeItemType = "copy" + // ListBlobsIncludeItemDeleted ... + ListBlobsIncludeItemDeleted ListBlobsIncludeItemType = "deleted" + // ListBlobsIncludeItemDeletedwithversions ... + ListBlobsIncludeItemDeletedwithversions ListBlobsIncludeItemType = "deletedwithversions" + // ListBlobsIncludeItemImmutabilitypolicy ... + ListBlobsIncludeItemImmutabilitypolicy ListBlobsIncludeItemType = "immutabilitypolicy" + // ListBlobsIncludeItemLegalhold ... + ListBlobsIncludeItemLegalhold ListBlobsIncludeItemType = "legalhold" + // ListBlobsIncludeItemMetadata ... + ListBlobsIncludeItemMetadata ListBlobsIncludeItemType = "metadata" + // ListBlobsIncludeItemNone represents an empty ListBlobsIncludeItemType. + ListBlobsIncludeItemNone ListBlobsIncludeItemType = "" + // ListBlobsIncludeItemPermissions ... + ListBlobsIncludeItemPermissions ListBlobsIncludeItemType = "permissions" + // ListBlobsIncludeItemSnapshots ... + ListBlobsIncludeItemSnapshots ListBlobsIncludeItemType = "snapshots" + // ListBlobsIncludeItemTags ... + ListBlobsIncludeItemTags ListBlobsIncludeItemType = "tags" + // ListBlobsIncludeItemUncommittedblobs ... + ListBlobsIncludeItemUncommittedblobs ListBlobsIncludeItemType = "uncommittedblobs" + // ListBlobsIncludeItemVersions ... + ListBlobsIncludeItemVersions ListBlobsIncludeItemType = "versions" +) + +// PossibleListBlobsIncludeItemTypeValues returns an array of possible values for the ListBlobsIncludeItemType const type. +func PossibleListBlobsIncludeItemTypeValues() []ListBlobsIncludeItemType { + return []ListBlobsIncludeItemType{ListBlobsIncludeItemCopy, ListBlobsIncludeItemDeleted, ListBlobsIncludeItemDeletedwithversions, ListBlobsIncludeItemImmutabilitypolicy, ListBlobsIncludeItemLegalhold, ListBlobsIncludeItemMetadata, ListBlobsIncludeItemNone, ListBlobsIncludeItemPermissions, ListBlobsIncludeItemSnapshots, ListBlobsIncludeItemTags, ListBlobsIncludeItemUncommittedblobs, ListBlobsIncludeItemVersions} +} + +// ListContainersIncludeType enumerates the values for list containers include type. +type ListContainersIncludeType string + +const ( + // ListContainersIncludeDeleted ... + ListContainersIncludeDeleted ListContainersIncludeType = "deleted" + // ListContainersIncludeMetadata ... + ListContainersIncludeMetadata ListContainersIncludeType = "metadata" + // ListContainersIncludeNone represents an empty ListContainersIncludeType. + ListContainersIncludeNone ListContainersIncludeType = "" + // ListContainersIncludeSystem ... + ListContainersIncludeSystem ListContainersIncludeType = "system" +) + +// PossibleListContainersIncludeTypeValues returns an array of possible values for the ListContainersIncludeType const type. +func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType { + return []ListContainersIncludeType{ListContainersIncludeDeleted, ListContainersIncludeMetadata, ListContainersIncludeNone, ListContainersIncludeSystem} +} + +// PremiumPageBlobAccessTierType enumerates the values for premium page blob access tier type. +type PremiumPageBlobAccessTierType string + +const ( + // PremiumPageBlobAccessTierNone represents an empty PremiumPageBlobAccessTierType. + PremiumPageBlobAccessTierNone PremiumPageBlobAccessTierType = "" + // PremiumPageBlobAccessTierP10 ... + PremiumPageBlobAccessTierP10 PremiumPageBlobAccessTierType = "P10" + // PremiumPageBlobAccessTierP15 ... + PremiumPageBlobAccessTierP15 PremiumPageBlobAccessTierType = "P15" + // PremiumPageBlobAccessTierP20 ... + PremiumPageBlobAccessTierP20 PremiumPageBlobAccessTierType = "P20" + // PremiumPageBlobAccessTierP30 ... + PremiumPageBlobAccessTierP30 PremiumPageBlobAccessTierType = "P30" + // PremiumPageBlobAccessTierP4 ... + PremiumPageBlobAccessTierP4 PremiumPageBlobAccessTierType = "P4" + // PremiumPageBlobAccessTierP40 ... + PremiumPageBlobAccessTierP40 PremiumPageBlobAccessTierType = "P40" + // PremiumPageBlobAccessTierP50 ... + PremiumPageBlobAccessTierP50 PremiumPageBlobAccessTierType = "P50" + // PremiumPageBlobAccessTierP6 ... + PremiumPageBlobAccessTierP6 PremiumPageBlobAccessTierType = "P6" + // PremiumPageBlobAccessTierP60 ... + PremiumPageBlobAccessTierP60 PremiumPageBlobAccessTierType = "P60" + // PremiumPageBlobAccessTierP70 ... + PremiumPageBlobAccessTierP70 PremiumPageBlobAccessTierType = "P70" + // PremiumPageBlobAccessTierP80 ... + PremiumPageBlobAccessTierP80 PremiumPageBlobAccessTierType = "P80" +) + +// PossiblePremiumPageBlobAccessTierTypeValues returns an array of possible values for the PremiumPageBlobAccessTierType const type. +func PossiblePremiumPageBlobAccessTierTypeValues() []PremiumPageBlobAccessTierType { + return []PremiumPageBlobAccessTierType{PremiumPageBlobAccessTierNone, PremiumPageBlobAccessTierP10, PremiumPageBlobAccessTierP15, PremiumPageBlobAccessTierP20, PremiumPageBlobAccessTierP30, PremiumPageBlobAccessTierP4, PremiumPageBlobAccessTierP40, PremiumPageBlobAccessTierP50, PremiumPageBlobAccessTierP6, PremiumPageBlobAccessTierP60, PremiumPageBlobAccessTierP70, PremiumPageBlobAccessTierP80} +} + +// PublicAccessType enumerates the values for public access type. +type PublicAccessType string + +const ( + // PublicAccessBlob ... + PublicAccessBlob PublicAccessType = "blob" + // PublicAccessContainer ... + PublicAccessContainer PublicAccessType = "container" + // PublicAccessNone represents an empty PublicAccessType. + PublicAccessNone PublicAccessType = "" +) + +// PossiblePublicAccessTypeValues returns an array of possible values for the PublicAccessType const type. +func PossiblePublicAccessTypeValues() []PublicAccessType { + return []PublicAccessType{PublicAccessBlob, PublicAccessContainer, PublicAccessNone} +} + +// QueryFormatType enumerates the values for query format type. +type QueryFormatType string + +const ( + // QueryFormatArrow ... + QueryFormatArrow QueryFormatType = "arrow" + // QueryFormatDelimited ... + QueryFormatDelimited QueryFormatType = "delimited" + // QueryFormatJSON ... + QueryFormatJSON QueryFormatType = "json" + // QueryFormatNone represents an empty QueryFormatType. + QueryFormatNone QueryFormatType = "" + // QueryFormatParquet ... + QueryFormatParquet QueryFormatType = "parquet" +) + +// PossibleQueryFormatTypeValues returns an array of possible values for the QueryFormatType const type. +func PossibleQueryFormatTypeValues() []QueryFormatType { + return []QueryFormatType{QueryFormatArrow, QueryFormatDelimited, QueryFormatJSON, QueryFormatNone, QueryFormatParquet} +} + +// RehydratePriorityType enumerates the values for rehydrate priority type. +type RehydratePriorityType string + +const ( + // RehydratePriorityHigh ... + RehydratePriorityHigh RehydratePriorityType = "High" + // RehydratePriorityNone represents an empty RehydratePriorityType. + RehydratePriorityNone RehydratePriorityType = "" + // RehydratePriorityStandard ... + RehydratePriorityStandard RehydratePriorityType = "Standard" +) + +// PossibleRehydratePriorityTypeValues returns an array of possible values for the RehydratePriorityType const type. +func PossibleRehydratePriorityTypeValues() []RehydratePriorityType { + return []RehydratePriorityType{RehydratePriorityHigh, RehydratePriorityNone, RehydratePriorityStandard} +} + +// SequenceNumberActionType enumerates the values for sequence number action type. +type SequenceNumberActionType string + +const ( + // SequenceNumberActionIncrement ... + SequenceNumberActionIncrement SequenceNumberActionType = "increment" + // SequenceNumberActionMax ... + SequenceNumberActionMax SequenceNumberActionType = "max" + // SequenceNumberActionNone represents an empty SequenceNumberActionType. + SequenceNumberActionNone SequenceNumberActionType = "" + // SequenceNumberActionUpdate ... + SequenceNumberActionUpdate SequenceNumberActionType = "update" +) + +// PossibleSequenceNumberActionTypeValues returns an array of possible values for the SequenceNumberActionType const type. +func PossibleSequenceNumberActionTypeValues() []SequenceNumberActionType { + return []SequenceNumberActionType{SequenceNumberActionIncrement, SequenceNumberActionMax, SequenceNumberActionNone, SequenceNumberActionUpdate} +} + +// SkuNameType enumerates the values for sku name type. +type SkuNameType string + +const ( + // SkuNameNone represents an empty SkuNameType. + SkuNameNone SkuNameType = "" + // SkuNamePremiumLRS ... + SkuNamePremiumLRS SkuNameType = "Premium_LRS" + // SkuNameStandardGRS ... + SkuNameStandardGRS SkuNameType = "Standard_GRS" + // SkuNameStandardLRS ... + SkuNameStandardLRS SkuNameType = "Standard_LRS" + // SkuNameStandardRAGRS ... + SkuNameStandardRAGRS SkuNameType = "Standard_RAGRS" + // SkuNameStandardZRS ... + SkuNameStandardZRS SkuNameType = "Standard_ZRS" +) + +// PossibleSkuNameTypeValues returns an array of possible values for the SkuNameType const type. +func PossibleSkuNameTypeValues() []SkuNameType { + return []SkuNameType{SkuNameNone, SkuNamePremiumLRS, SkuNameStandardGRS, SkuNameStandardLRS, SkuNameStandardRAGRS, SkuNameStandardZRS} +} + +// StorageErrorCodeType enumerates the values for storage error code type. +type StorageErrorCodeType string + +const ( + // StorageErrorCodeAccountAlreadyExists ... + StorageErrorCodeAccountAlreadyExists StorageErrorCodeType = "AccountAlreadyExists" + // StorageErrorCodeAccountBeingCreated ... + StorageErrorCodeAccountBeingCreated StorageErrorCodeType = "AccountBeingCreated" + // StorageErrorCodeAccountIsDisabled ... + StorageErrorCodeAccountIsDisabled StorageErrorCodeType = "AccountIsDisabled" + // StorageErrorCodeAppendPositionConditionNotMet ... + StorageErrorCodeAppendPositionConditionNotMet StorageErrorCodeType = "AppendPositionConditionNotMet" + // StorageErrorCodeAuthenticationFailed ... + StorageErrorCodeAuthenticationFailed StorageErrorCodeType = "AuthenticationFailed" + // StorageErrorCodeAuthorizationFailure ... + StorageErrorCodeAuthorizationFailure StorageErrorCodeType = "AuthorizationFailure" + // StorageErrorCodeAuthorizationPermissionMismatch ... + StorageErrorCodeAuthorizationPermissionMismatch StorageErrorCodeType = "AuthorizationPermissionMismatch" + // StorageErrorCodeAuthorizationProtocolMismatch ... + StorageErrorCodeAuthorizationProtocolMismatch StorageErrorCodeType = "AuthorizationProtocolMismatch" + // StorageErrorCodeAuthorizationResourceTypeMismatch ... + StorageErrorCodeAuthorizationResourceTypeMismatch StorageErrorCodeType = "AuthorizationResourceTypeMismatch" + // StorageErrorCodeAuthorizationServiceMismatch ... + StorageErrorCodeAuthorizationServiceMismatch StorageErrorCodeType = "AuthorizationServiceMismatch" + // StorageErrorCodeAuthorizationSourceIPMismatch ... + StorageErrorCodeAuthorizationSourceIPMismatch StorageErrorCodeType = "AuthorizationSourceIPMismatch" + // StorageErrorCodeBlobAlreadyExists ... + StorageErrorCodeBlobAlreadyExists StorageErrorCodeType = "BlobAlreadyExists" + // StorageErrorCodeBlobArchived ... + StorageErrorCodeBlobArchived StorageErrorCodeType = "BlobArchived" + // StorageErrorCodeBlobBeingRehydrated ... + StorageErrorCodeBlobBeingRehydrated StorageErrorCodeType = "BlobBeingRehydrated" + // StorageErrorCodeBlobImmutableDueToPolicy ... + StorageErrorCodeBlobImmutableDueToPolicy StorageErrorCodeType = "BlobImmutableDueToPolicy" + // StorageErrorCodeBlobNotArchived ... + StorageErrorCodeBlobNotArchived StorageErrorCodeType = "BlobNotArchived" + // StorageErrorCodeBlobNotFound ... + StorageErrorCodeBlobNotFound StorageErrorCodeType = "BlobNotFound" + // StorageErrorCodeBlobOverwritten ... + StorageErrorCodeBlobOverwritten StorageErrorCodeType = "BlobOverwritten" + // StorageErrorCodeBlobTierInadequateForContentLength ... + StorageErrorCodeBlobTierInadequateForContentLength StorageErrorCodeType = "BlobTierInadequateForContentLength" + // StorageErrorCodeBlobUsesCustomerSpecifiedEncryption ... + StorageErrorCodeBlobUsesCustomerSpecifiedEncryption StorageErrorCodeType = "BlobUsesCustomerSpecifiedEncryption" + // StorageErrorCodeBlockCountExceedsLimit ... + StorageErrorCodeBlockCountExceedsLimit StorageErrorCodeType = "BlockCountExceedsLimit" + // StorageErrorCodeBlockListTooLong ... + StorageErrorCodeBlockListTooLong StorageErrorCodeType = "BlockListTooLong" + // StorageErrorCodeCannotChangeToLowerTier ... + StorageErrorCodeCannotChangeToLowerTier StorageErrorCodeType = "CannotChangeToLowerTier" + // StorageErrorCodeCannotVerifyCopySource ... + StorageErrorCodeCannotVerifyCopySource StorageErrorCodeType = "CannotVerifyCopySource" + // StorageErrorCodeConditionHeadersNotSupported ... + StorageErrorCodeConditionHeadersNotSupported StorageErrorCodeType = "ConditionHeadersNotSupported" + // StorageErrorCodeConditionNotMet ... + StorageErrorCodeConditionNotMet StorageErrorCodeType = "ConditionNotMet" + // StorageErrorCodeContainerAlreadyExists ... + StorageErrorCodeContainerAlreadyExists StorageErrorCodeType = "ContainerAlreadyExists" + // StorageErrorCodeContainerBeingDeleted ... + StorageErrorCodeContainerBeingDeleted StorageErrorCodeType = "ContainerBeingDeleted" + // StorageErrorCodeContainerDisabled ... + StorageErrorCodeContainerDisabled StorageErrorCodeType = "ContainerDisabled" + // StorageErrorCodeContainerNotFound ... + StorageErrorCodeContainerNotFound StorageErrorCodeType = "ContainerNotFound" + // StorageErrorCodeContentLengthLargerThanTierLimit ... + StorageErrorCodeContentLengthLargerThanTierLimit StorageErrorCodeType = "ContentLengthLargerThanTierLimit" + // StorageErrorCodeCopyAcrossAccountsNotSupported ... + StorageErrorCodeCopyAcrossAccountsNotSupported StorageErrorCodeType = "CopyAcrossAccountsNotSupported" + // StorageErrorCodeCopyIDMismatch ... + StorageErrorCodeCopyIDMismatch StorageErrorCodeType = "CopyIdMismatch" + // StorageErrorCodeEmptyMetadataKey ... + StorageErrorCodeEmptyMetadataKey StorageErrorCodeType = "EmptyMetadataKey" + // StorageErrorCodeFeatureVersionMismatch ... + StorageErrorCodeFeatureVersionMismatch StorageErrorCodeType = "FeatureVersionMismatch" + // StorageErrorCodeIncrementalCopyBlobMismatch ... + StorageErrorCodeIncrementalCopyBlobMismatch StorageErrorCodeType = "IncrementalCopyBlobMismatch" + // StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed ... + StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed StorageErrorCodeType = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + // StorageErrorCodeIncrementalCopySourceMustBeSnapshot ... + StorageErrorCodeIncrementalCopySourceMustBeSnapshot StorageErrorCodeType = "IncrementalCopySourceMustBeSnapshot" + // StorageErrorCodeInfiniteLeaseDurationRequired ... + StorageErrorCodeInfiniteLeaseDurationRequired StorageErrorCodeType = "InfiniteLeaseDurationRequired" + // StorageErrorCodeInsufficientAccountPermissions ... + StorageErrorCodeInsufficientAccountPermissions StorageErrorCodeType = "InsufficientAccountPermissions" + // StorageErrorCodeInternalError ... + StorageErrorCodeInternalError StorageErrorCodeType = "InternalError" + // StorageErrorCodeInvalidAuthenticationInfo ... + StorageErrorCodeInvalidAuthenticationInfo StorageErrorCodeType = "InvalidAuthenticationInfo" + // StorageErrorCodeInvalidBlobOrBlock ... + StorageErrorCodeInvalidBlobOrBlock StorageErrorCodeType = "InvalidBlobOrBlock" + // StorageErrorCodeInvalidBlobTier ... + StorageErrorCodeInvalidBlobTier StorageErrorCodeType = "InvalidBlobTier" + // StorageErrorCodeInvalidBlobType ... + StorageErrorCodeInvalidBlobType StorageErrorCodeType = "InvalidBlobType" + // StorageErrorCodeInvalidBlockID ... + StorageErrorCodeInvalidBlockID StorageErrorCodeType = "InvalidBlockId" + // StorageErrorCodeInvalidBlockList ... + StorageErrorCodeInvalidBlockList StorageErrorCodeType = "InvalidBlockList" + // StorageErrorCodeInvalidHeaderValue ... + StorageErrorCodeInvalidHeaderValue StorageErrorCodeType = "InvalidHeaderValue" + // StorageErrorCodeInvalidHTTPVerb ... + StorageErrorCodeInvalidHTTPVerb StorageErrorCodeType = "InvalidHttpVerb" + // StorageErrorCodeInvalidInput ... + StorageErrorCodeInvalidInput StorageErrorCodeType = "InvalidInput" + // StorageErrorCodeInvalidMd5 ... + StorageErrorCodeInvalidMd5 StorageErrorCodeType = "InvalidMd5" + // StorageErrorCodeInvalidMetadata ... + StorageErrorCodeInvalidMetadata StorageErrorCodeType = "InvalidMetadata" + // StorageErrorCodeInvalidOperation ... + StorageErrorCodeInvalidOperation StorageErrorCodeType = "InvalidOperation" + // StorageErrorCodeInvalidPageRange ... + StorageErrorCodeInvalidPageRange StorageErrorCodeType = "InvalidPageRange" + // StorageErrorCodeInvalidQueryParameterValue ... + StorageErrorCodeInvalidQueryParameterValue StorageErrorCodeType = "InvalidQueryParameterValue" + // StorageErrorCodeInvalidRange ... + StorageErrorCodeInvalidRange StorageErrorCodeType = "InvalidRange" + // StorageErrorCodeInvalidResourceName ... + StorageErrorCodeInvalidResourceName StorageErrorCodeType = "InvalidResourceName" + // StorageErrorCodeInvalidSourceBlobType ... + StorageErrorCodeInvalidSourceBlobType StorageErrorCodeType = "InvalidSourceBlobType" + // StorageErrorCodeInvalidSourceBlobURL ... + StorageErrorCodeInvalidSourceBlobURL StorageErrorCodeType = "InvalidSourceBlobUrl" + // StorageErrorCodeInvalidURI ... + StorageErrorCodeInvalidURI StorageErrorCodeType = "InvalidUri" + // StorageErrorCodeInvalidVersionForPageBlobOperation ... + StorageErrorCodeInvalidVersionForPageBlobOperation StorageErrorCodeType = "InvalidVersionForPageBlobOperation" + // StorageErrorCodeInvalidXMLDocument ... + StorageErrorCodeInvalidXMLDocument StorageErrorCodeType = "InvalidXmlDocument" + // StorageErrorCodeInvalidXMLNodeValue ... + StorageErrorCodeInvalidXMLNodeValue StorageErrorCodeType = "InvalidXmlNodeValue" + // StorageErrorCodeLeaseAlreadyBroken ... + StorageErrorCodeLeaseAlreadyBroken StorageErrorCodeType = "LeaseAlreadyBroken" + // StorageErrorCodeLeaseAlreadyPresent ... + StorageErrorCodeLeaseAlreadyPresent StorageErrorCodeType = "LeaseAlreadyPresent" + // StorageErrorCodeLeaseIDMismatchWithBlobOperation ... + StorageErrorCodeLeaseIDMismatchWithBlobOperation StorageErrorCodeType = "LeaseIdMismatchWithBlobOperation" + // StorageErrorCodeLeaseIDMismatchWithContainerOperation ... + StorageErrorCodeLeaseIDMismatchWithContainerOperation StorageErrorCodeType = "LeaseIdMismatchWithContainerOperation" + // StorageErrorCodeLeaseIDMismatchWithLeaseOperation ... + StorageErrorCodeLeaseIDMismatchWithLeaseOperation StorageErrorCodeType = "LeaseIdMismatchWithLeaseOperation" + // StorageErrorCodeLeaseIDMissing ... + StorageErrorCodeLeaseIDMissing StorageErrorCodeType = "LeaseIdMissing" + // StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired ... + StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired StorageErrorCodeType = "LeaseIsBreakingAndCannotBeAcquired" + // StorageErrorCodeLeaseIsBreakingAndCannotBeChanged ... + StorageErrorCodeLeaseIsBreakingAndCannotBeChanged StorageErrorCodeType = "LeaseIsBreakingAndCannotBeChanged" + // StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed ... + StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed StorageErrorCodeType = "LeaseIsBrokenAndCannotBeRenewed" + // StorageErrorCodeLeaseLost ... + StorageErrorCodeLeaseLost StorageErrorCodeType = "LeaseLost" + // StorageErrorCodeLeaseNotPresentWithBlobOperation ... + StorageErrorCodeLeaseNotPresentWithBlobOperation StorageErrorCodeType = "LeaseNotPresentWithBlobOperation" + // StorageErrorCodeLeaseNotPresentWithContainerOperation ... + StorageErrorCodeLeaseNotPresentWithContainerOperation StorageErrorCodeType = "LeaseNotPresentWithContainerOperation" + // StorageErrorCodeLeaseNotPresentWithLeaseOperation ... + StorageErrorCodeLeaseNotPresentWithLeaseOperation StorageErrorCodeType = "LeaseNotPresentWithLeaseOperation" + // StorageErrorCodeMaxBlobSizeConditionNotMet ... + StorageErrorCodeMaxBlobSizeConditionNotMet StorageErrorCodeType = "MaxBlobSizeConditionNotMet" + // StorageErrorCodeMd5Mismatch ... + StorageErrorCodeMd5Mismatch StorageErrorCodeType = "Md5Mismatch" + // StorageErrorCodeMetadataTooLarge ... + StorageErrorCodeMetadataTooLarge StorageErrorCodeType = "MetadataTooLarge" + // StorageErrorCodeMissingContentLengthHeader ... + StorageErrorCodeMissingContentLengthHeader StorageErrorCodeType = "MissingContentLengthHeader" + // StorageErrorCodeMissingRequiredHeader ... + StorageErrorCodeMissingRequiredHeader StorageErrorCodeType = "MissingRequiredHeader" + // StorageErrorCodeMissingRequiredQueryParameter ... + StorageErrorCodeMissingRequiredQueryParameter StorageErrorCodeType = "MissingRequiredQueryParameter" + // StorageErrorCodeMissingRequiredXMLNode ... + StorageErrorCodeMissingRequiredXMLNode StorageErrorCodeType = "MissingRequiredXmlNode" + // StorageErrorCodeMultipleConditionHeadersNotSupported ... + StorageErrorCodeMultipleConditionHeadersNotSupported StorageErrorCodeType = "MultipleConditionHeadersNotSupported" + // StorageErrorCodeNoAuthenticationInformation ... + StorageErrorCodeNoAuthenticationInformation StorageErrorCodeType = "NoAuthenticationInformation" + // StorageErrorCodeNone represents an empty StorageErrorCodeType. + StorageErrorCodeNone StorageErrorCodeType = "" + // StorageErrorCodeNoPendingCopyOperation ... + StorageErrorCodeNoPendingCopyOperation StorageErrorCodeType = "NoPendingCopyOperation" + // StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob ... + StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob StorageErrorCodeType = "OperationNotAllowedOnIncrementalCopyBlob" + // StorageErrorCodeOperationTimedOut ... + StorageErrorCodeOperationTimedOut StorageErrorCodeType = "OperationTimedOut" + // StorageErrorCodeOutOfRangeInput ... + StorageErrorCodeOutOfRangeInput StorageErrorCodeType = "OutOfRangeInput" + // StorageErrorCodeOutOfRangeQueryParameterValue ... + StorageErrorCodeOutOfRangeQueryParameterValue StorageErrorCodeType = "OutOfRangeQueryParameterValue" + // StorageErrorCodePendingCopyOperation ... + StorageErrorCodePendingCopyOperation StorageErrorCodeType = "PendingCopyOperation" + // StorageErrorCodePreviousSnapshotCannotBeNewer ... + StorageErrorCodePreviousSnapshotCannotBeNewer StorageErrorCodeType = "PreviousSnapshotCannotBeNewer" + // StorageErrorCodePreviousSnapshotNotFound ... + StorageErrorCodePreviousSnapshotNotFound StorageErrorCodeType = "PreviousSnapshotNotFound" + // StorageErrorCodePreviousSnapshotOperationNotSupported ... + StorageErrorCodePreviousSnapshotOperationNotSupported StorageErrorCodeType = "PreviousSnapshotOperationNotSupported" + // StorageErrorCodeRequestBodyTooLarge ... + StorageErrorCodeRequestBodyTooLarge StorageErrorCodeType = "RequestBodyTooLarge" + // StorageErrorCodeRequestURLFailedToParse ... + StorageErrorCodeRequestURLFailedToParse StorageErrorCodeType = "RequestUrlFailedToParse" + // StorageErrorCodeResourceAlreadyExists ... + StorageErrorCodeResourceAlreadyExists StorageErrorCodeType = "ResourceAlreadyExists" + // StorageErrorCodeResourceNotFound ... + StorageErrorCodeResourceNotFound StorageErrorCodeType = "ResourceNotFound" + // StorageErrorCodeResourceTypeMismatch ... + StorageErrorCodeResourceTypeMismatch StorageErrorCodeType = "ResourceTypeMismatch" + // StorageErrorCodeSequenceNumberConditionNotMet ... + StorageErrorCodeSequenceNumberConditionNotMet StorageErrorCodeType = "SequenceNumberConditionNotMet" + // StorageErrorCodeSequenceNumberIncrementTooLarge ... + StorageErrorCodeSequenceNumberIncrementTooLarge StorageErrorCodeType = "SequenceNumberIncrementTooLarge" + // StorageErrorCodeServerBusy ... + StorageErrorCodeServerBusy StorageErrorCodeType = "ServerBusy" + // StorageErrorCodeSnapshotCountExceeded ... + StorageErrorCodeSnapshotCountExceeded StorageErrorCodeType = "SnapshotCountExceeded" + // StorageErrorCodeSnapshotOperationRateExceeded ... + StorageErrorCodeSnapshotOperationRateExceeded StorageErrorCodeType = "SnapshotOperationRateExceeded" + // StorageErrorCodeSnapshotsPresent ... + StorageErrorCodeSnapshotsPresent StorageErrorCodeType = "SnapshotsPresent" + // StorageErrorCodeSourceConditionNotMet ... + StorageErrorCodeSourceConditionNotMet StorageErrorCodeType = "SourceConditionNotMet" + // StorageErrorCodeSystemInUse ... + StorageErrorCodeSystemInUse StorageErrorCodeType = "SystemInUse" + // StorageErrorCodeTargetConditionNotMet ... + StorageErrorCodeTargetConditionNotMet StorageErrorCodeType = "TargetConditionNotMet" + // StorageErrorCodeUnauthorizedBlobOverwrite ... + StorageErrorCodeUnauthorizedBlobOverwrite StorageErrorCodeType = "UnauthorizedBlobOverwrite" + // StorageErrorCodeUnsupportedHeader ... + StorageErrorCodeUnsupportedHeader StorageErrorCodeType = "UnsupportedHeader" + // StorageErrorCodeUnsupportedHTTPVerb ... + StorageErrorCodeUnsupportedHTTPVerb StorageErrorCodeType = "UnsupportedHttpVerb" + // StorageErrorCodeUnsupportedQueryParameter ... + StorageErrorCodeUnsupportedQueryParameter StorageErrorCodeType = "UnsupportedQueryParameter" + // StorageErrorCodeUnsupportedXMLNode ... + StorageErrorCodeUnsupportedXMLNode StorageErrorCodeType = "UnsupportedXmlNode" +) + +// PossibleStorageErrorCodeTypeValues returns an array of possible values for the StorageErrorCodeType const type. +func PossibleStorageErrorCodeTypeValues() []StorageErrorCodeType { + return []StorageErrorCodeType{StorageErrorCodeAccountAlreadyExists, StorageErrorCodeAccountBeingCreated, StorageErrorCodeAccountIsDisabled, StorageErrorCodeAppendPositionConditionNotMet, StorageErrorCodeAuthenticationFailed, StorageErrorCodeAuthorizationFailure, StorageErrorCodeAuthorizationPermissionMismatch, StorageErrorCodeAuthorizationProtocolMismatch, StorageErrorCodeAuthorizationResourceTypeMismatch, StorageErrorCodeAuthorizationServiceMismatch, StorageErrorCodeAuthorizationSourceIPMismatch, StorageErrorCodeBlobAlreadyExists, StorageErrorCodeBlobArchived, StorageErrorCodeBlobBeingRehydrated, StorageErrorCodeBlobImmutableDueToPolicy, StorageErrorCodeBlobNotArchived, StorageErrorCodeBlobNotFound, StorageErrorCodeBlobOverwritten, StorageErrorCodeBlobTierInadequateForContentLength, StorageErrorCodeBlobUsesCustomerSpecifiedEncryption, StorageErrorCodeBlockCountExceedsLimit, StorageErrorCodeBlockListTooLong, StorageErrorCodeCannotChangeToLowerTier, StorageErrorCodeCannotVerifyCopySource, StorageErrorCodeConditionHeadersNotSupported, StorageErrorCodeConditionNotMet, StorageErrorCodeContainerAlreadyExists, StorageErrorCodeContainerBeingDeleted, StorageErrorCodeContainerDisabled, StorageErrorCodeContainerNotFound, StorageErrorCodeContentLengthLargerThanTierLimit, StorageErrorCodeCopyAcrossAccountsNotSupported, StorageErrorCodeCopyIDMismatch, StorageErrorCodeEmptyMetadataKey, StorageErrorCodeFeatureVersionMismatch, StorageErrorCodeIncrementalCopyBlobMismatch, StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed, StorageErrorCodeIncrementalCopySourceMustBeSnapshot, StorageErrorCodeInfiniteLeaseDurationRequired, StorageErrorCodeInsufficientAccountPermissions, StorageErrorCodeInternalError, StorageErrorCodeInvalidAuthenticationInfo, StorageErrorCodeInvalidBlobOrBlock, StorageErrorCodeInvalidBlobTier, StorageErrorCodeInvalidBlobType, StorageErrorCodeInvalidBlockID, StorageErrorCodeInvalidBlockList, StorageErrorCodeInvalidHeaderValue, StorageErrorCodeInvalidHTTPVerb, StorageErrorCodeInvalidInput, StorageErrorCodeInvalidMd5, StorageErrorCodeInvalidMetadata, StorageErrorCodeInvalidOperation, StorageErrorCodeInvalidPageRange, StorageErrorCodeInvalidQueryParameterValue, StorageErrorCodeInvalidRange, StorageErrorCodeInvalidResourceName, StorageErrorCodeInvalidSourceBlobType, StorageErrorCodeInvalidSourceBlobURL, StorageErrorCodeInvalidURI, StorageErrorCodeInvalidVersionForPageBlobOperation, StorageErrorCodeInvalidXMLDocument, StorageErrorCodeInvalidXMLNodeValue, StorageErrorCodeLeaseAlreadyBroken, StorageErrorCodeLeaseAlreadyPresent, StorageErrorCodeLeaseIDMismatchWithBlobOperation, StorageErrorCodeLeaseIDMismatchWithContainerOperation, StorageErrorCodeLeaseIDMismatchWithLeaseOperation, StorageErrorCodeLeaseIDMissing, StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired, StorageErrorCodeLeaseIsBreakingAndCannotBeChanged, StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed, StorageErrorCodeLeaseLost, StorageErrorCodeLeaseNotPresentWithBlobOperation, StorageErrorCodeLeaseNotPresentWithContainerOperation, StorageErrorCodeLeaseNotPresentWithLeaseOperation, StorageErrorCodeMaxBlobSizeConditionNotMet, StorageErrorCodeMd5Mismatch, StorageErrorCodeMetadataTooLarge, StorageErrorCodeMissingContentLengthHeader, StorageErrorCodeMissingRequiredHeader, StorageErrorCodeMissingRequiredQueryParameter, StorageErrorCodeMissingRequiredXMLNode, StorageErrorCodeMultipleConditionHeadersNotSupported, StorageErrorCodeNoAuthenticationInformation, StorageErrorCodeNone, StorageErrorCodeNoPendingCopyOperation, StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob, StorageErrorCodeOperationTimedOut, StorageErrorCodeOutOfRangeInput, StorageErrorCodeOutOfRangeQueryParameterValue, StorageErrorCodePendingCopyOperation, StorageErrorCodePreviousSnapshotCannotBeNewer, StorageErrorCodePreviousSnapshotNotFound, StorageErrorCodePreviousSnapshotOperationNotSupported, StorageErrorCodeRequestBodyTooLarge, StorageErrorCodeRequestURLFailedToParse, StorageErrorCodeResourceAlreadyExists, StorageErrorCodeResourceNotFound, StorageErrorCodeResourceTypeMismatch, StorageErrorCodeSequenceNumberConditionNotMet, StorageErrorCodeSequenceNumberIncrementTooLarge, StorageErrorCodeServerBusy, StorageErrorCodeSnapshotCountExceeded, StorageErrorCodeSnapshotOperationRateExceeded, StorageErrorCodeSnapshotsPresent, StorageErrorCodeSourceConditionNotMet, StorageErrorCodeSystemInUse, StorageErrorCodeTargetConditionNotMet, StorageErrorCodeUnauthorizedBlobOverwrite, StorageErrorCodeUnsupportedHeader, StorageErrorCodeUnsupportedHTTPVerb, StorageErrorCodeUnsupportedQueryParameter, StorageErrorCodeUnsupportedXMLNode} +} + +// SyncCopyStatusType enumerates the values for sync copy status type. +type SyncCopyStatusType string + +const ( + // SyncCopyStatusNone represents an empty SyncCopyStatusType. + SyncCopyStatusNone SyncCopyStatusType = "" + // SyncCopyStatusSuccess ... + SyncCopyStatusSuccess SyncCopyStatusType = "success" +) + +// PossibleSyncCopyStatusTypeValues returns an array of possible values for the SyncCopyStatusType const type. +func PossibleSyncCopyStatusTypeValues() []SyncCopyStatusType { + return []SyncCopyStatusType{SyncCopyStatusNone, SyncCopyStatusSuccess} +} + +// AccessPolicy - An Access policy +type AccessPolicy struct { + // Start - the date-time the policy is active + Start *time.Time `xml:"Start"` + // Expiry - the date-time the policy expires + Expiry *time.Time `xml:"Expiry"` + // Permission - the permissions for the acl policy + Permission *string `xml:"Permission"` +} + +// MarshalXML implements the xml.Marshaler interface for AccessPolicy. +func (ap AccessPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + ap2 := (*accessPolicy)(unsafe.Pointer(&ap)) + return e.EncodeElement(*ap2, start) +} + +// UnmarshalXML implements the xml.Unmarshaler interface for AccessPolicy. +func (ap *AccessPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + ap2 := (*accessPolicy)(unsafe.Pointer(ap)) + return d.DecodeElement(ap2, &start) +} + +// AppendBlobAppendBlockFromURLResponse ... +type AppendBlobAppendBlockFromURLResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (ababfur AppendBlobAppendBlockFromURLResponse) Response() *http.Response { + return ababfur.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (ababfur AppendBlobAppendBlockFromURLResponse) StatusCode() int { + return ababfur.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (ababfur AppendBlobAppendBlockFromURLResponse) Status() string { + return ababfur.rawResponse.Status +} + +// BlobAppendOffset returns the value for header x-ms-blob-append-offset. +func (ababfur AppendBlobAppendBlockFromURLResponse) BlobAppendOffset() string { + return ababfur.rawResponse.Header.Get("x-ms-blob-append-offset") +} + +// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. +func (ababfur AppendBlobAppendBlockFromURLResponse) BlobCommittedBlockCount() int32 { + s := ababfur.rawResponse.Header.Get("x-ms-blob-committed-block-count") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + i = 0 + } + return int32(i) +} + +// ContentMD5 returns the value for header Content-MD5. +func (ababfur AppendBlobAppendBlockFromURLResponse) ContentMD5() []byte { + s := ababfur.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (ababfur AppendBlobAppendBlockFromURLResponse) Date() time.Time { + s := ababfur.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (ababfur AppendBlobAppendBlockFromURLResponse) EncryptionKeySha256() string { + return ababfur.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (ababfur AppendBlobAppendBlockFromURLResponse) EncryptionScope() string { + return ababfur.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (ababfur AppendBlobAppendBlockFromURLResponse) ErrorCode() string { + return ababfur.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (ababfur AppendBlobAppendBlockFromURLResponse) ETag() ETag { + return ETag(ababfur.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (ababfur AppendBlobAppendBlockFromURLResponse) IsServerEncrypted() string { + return ababfur.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (ababfur AppendBlobAppendBlockFromURLResponse) LastModified() time.Time { + s := ababfur.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (ababfur AppendBlobAppendBlockFromURLResponse) RequestID() string { + return ababfur.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (ababfur AppendBlobAppendBlockFromURLResponse) Version() string { + return ababfur.rawResponse.Header.Get("x-ms-version") +} + +// XMsContentCrc64 returns the value for header x-ms-content-crc64. +func (ababfur AppendBlobAppendBlockFromURLResponse) XMsContentCrc64() []byte { + s := ababfur.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// AppendBlobAppendBlockResponse ... +type AppendBlobAppendBlockResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (ababr AppendBlobAppendBlockResponse) Response() *http.Response { + return ababr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (ababr AppendBlobAppendBlockResponse) StatusCode() int { + return ababr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (ababr AppendBlobAppendBlockResponse) Status() string { + return ababr.rawResponse.Status +} + +// BlobAppendOffset returns the value for header x-ms-blob-append-offset. +func (ababr AppendBlobAppendBlockResponse) BlobAppendOffset() string { + return ababr.rawResponse.Header.Get("x-ms-blob-append-offset") +} + +// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. +func (ababr AppendBlobAppendBlockResponse) BlobCommittedBlockCount() int32 { + s := ababr.rawResponse.Header.Get("x-ms-blob-committed-block-count") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + i = 0 + } + return int32(i) +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (ababr AppendBlobAppendBlockResponse) ClientRequestID() string { + return ababr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentMD5 returns the value for header Content-MD5. +func (ababr AppendBlobAppendBlockResponse) ContentMD5() []byte { + s := ababr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (ababr AppendBlobAppendBlockResponse) Date() time.Time { + s := ababr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (ababr AppendBlobAppendBlockResponse) EncryptionKeySha256() string { + return ababr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (ababr AppendBlobAppendBlockResponse) EncryptionScope() string { + return ababr.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (ababr AppendBlobAppendBlockResponse) ErrorCode() string { + return ababr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (ababr AppendBlobAppendBlockResponse) ETag() ETag { + return ETag(ababr.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (ababr AppendBlobAppendBlockResponse) IsServerEncrypted() string { + return ababr.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (ababr AppendBlobAppendBlockResponse) LastModified() time.Time { + s := ababr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (ababr AppendBlobAppendBlockResponse) RequestID() string { + return ababr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (ababr AppendBlobAppendBlockResponse) Version() string { + return ababr.rawResponse.Header.Get("x-ms-version") +} + +// XMsContentCrc64 returns the value for header x-ms-content-crc64. +func (ababr AppendBlobAppendBlockResponse) XMsContentCrc64() []byte { + s := ababr.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// AppendBlobCreateResponse ... +type AppendBlobCreateResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (abcr AppendBlobCreateResponse) Response() *http.Response { + return abcr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (abcr AppendBlobCreateResponse) StatusCode() int { + return abcr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (abcr AppendBlobCreateResponse) Status() string { + return abcr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (abcr AppendBlobCreateResponse) ClientRequestID() string { + return abcr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentMD5 returns the value for header Content-MD5. +func (abcr AppendBlobCreateResponse) ContentMD5() []byte { + s := abcr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (abcr AppendBlobCreateResponse) Date() time.Time { + s := abcr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (abcr AppendBlobCreateResponse) EncryptionKeySha256() string { + return abcr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (abcr AppendBlobCreateResponse) EncryptionScope() string { + return abcr.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (abcr AppendBlobCreateResponse) ErrorCode() string { + return abcr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (abcr AppendBlobCreateResponse) ETag() ETag { + return ETag(abcr.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (abcr AppendBlobCreateResponse) IsServerEncrypted() string { + return abcr.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (abcr AppendBlobCreateResponse) LastModified() time.Time { + s := abcr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (abcr AppendBlobCreateResponse) RequestID() string { + return abcr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (abcr AppendBlobCreateResponse) Version() string { + return abcr.rawResponse.Header.Get("x-ms-version") +} + +// VersionID returns the value for header x-ms-version-id. +func (abcr AppendBlobCreateResponse) VersionID() string { + return abcr.rawResponse.Header.Get("x-ms-version-id") +} + +// AppendBlobSealResponse ... +type AppendBlobSealResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (absr AppendBlobSealResponse) Response() *http.Response { + return absr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (absr AppendBlobSealResponse) StatusCode() int { + return absr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (absr AppendBlobSealResponse) Status() string { + return absr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (absr AppendBlobSealResponse) ClientRequestID() string { + return absr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (absr AppendBlobSealResponse) Date() time.Time { + s := absr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (absr AppendBlobSealResponse) ErrorCode() string { + return absr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (absr AppendBlobSealResponse) ETag() ETag { + return ETag(absr.rawResponse.Header.Get("ETag")) +} + +// IsSealed returns the value for header x-ms-blob-sealed. +func (absr AppendBlobSealResponse) IsSealed() string { + return absr.rawResponse.Header.Get("x-ms-blob-sealed") +} + +// LastModified returns the value for header Last-Modified. +func (absr AppendBlobSealResponse) LastModified() time.Time { + s := absr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (absr AppendBlobSealResponse) RequestID() string { + return absr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (absr AppendBlobSealResponse) Version() string { + return absr.rawResponse.Header.Get("x-ms-version") +} + +// ArrowConfiguration - Groups the settings used for formatting the response if the response should be Arrow +// formatted. +type ArrowConfiguration struct { + Schema []ArrowField `xml:"Schema>Field"` +} + +// ArrowField - Groups settings regarding specific field of an arrow schema +type ArrowField struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Field"` + Type string `xml:"Type"` + Name *string `xml:"Name"` + Precision *int32 `xml:"Precision"` + Scale *int32 `xml:"Scale"` +} + +// BlobAbortCopyFromURLResponse ... +type BlobAbortCopyFromURLResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bacfur BlobAbortCopyFromURLResponse) Response() *http.Response { + return bacfur.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bacfur BlobAbortCopyFromURLResponse) StatusCode() int { + return bacfur.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bacfur BlobAbortCopyFromURLResponse) Status() string { + return bacfur.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bacfur BlobAbortCopyFromURLResponse) ClientRequestID() string { + return bacfur.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bacfur BlobAbortCopyFromURLResponse) Date() time.Time { + s := bacfur.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bacfur BlobAbortCopyFromURLResponse) ErrorCode() string { + return bacfur.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (bacfur BlobAbortCopyFromURLResponse) RequestID() string { + return bacfur.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bacfur BlobAbortCopyFromURLResponse) Version() string { + return bacfur.rawResponse.Header.Get("x-ms-version") +} + +// BlobAcquireLeaseResponse ... +type BlobAcquireLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (balr BlobAcquireLeaseResponse) Response() *http.Response { + return balr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (balr BlobAcquireLeaseResponse) StatusCode() int { + return balr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (balr BlobAcquireLeaseResponse) Status() string { + return balr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (balr BlobAcquireLeaseResponse) ClientRequestID() string { + return balr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (balr BlobAcquireLeaseResponse) Date() time.Time { + s := balr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (balr BlobAcquireLeaseResponse) ErrorCode() string { + return balr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (balr BlobAcquireLeaseResponse) ETag() ETag { + return ETag(balr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (balr BlobAcquireLeaseResponse) LastModified() time.Time { + s := balr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseID returns the value for header x-ms-lease-id. +func (balr BlobAcquireLeaseResponse) LeaseID() string { + return balr.rawResponse.Header.Get("x-ms-lease-id") +} + +// RequestID returns the value for header x-ms-request-id. +func (balr BlobAcquireLeaseResponse) RequestID() string { + return balr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (balr BlobAcquireLeaseResponse) Version() string { + return balr.rawResponse.Header.Get("x-ms-version") +} + +// BlobBreakLeaseResponse ... +type BlobBreakLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bblr BlobBreakLeaseResponse) Response() *http.Response { + return bblr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bblr BlobBreakLeaseResponse) StatusCode() int { + return bblr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bblr BlobBreakLeaseResponse) Status() string { + return bblr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bblr BlobBreakLeaseResponse) ClientRequestID() string { + return bblr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bblr BlobBreakLeaseResponse) Date() time.Time { + s := bblr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bblr BlobBreakLeaseResponse) ErrorCode() string { + return bblr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bblr BlobBreakLeaseResponse) ETag() ETag { + return ETag(bblr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (bblr BlobBreakLeaseResponse) LastModified() time.Time { + s := bblr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseTime returns the value for header x-ms-lease-time. +func (bblr BlobBreakLeaseResponse) LeaseTime() int32 { + s := bblr.rawResponse.Header.Get("x-ms-lease-time") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + i = 0 + } + return int32(i) +} + +// RequestID returns the value for header x-ms-request-id. +func (bblr BlobBreakLeaseResponse) RequestID() string { + return bblr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bblr BlobBreakLeaseResponse) Version() string { + return bblr.rawResponse.Header.Get("x-ms-version") +} + +// BlobChangeLeaseResponse ... +type BlobChangeLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bclr BlobChangeLeaseResponse) Response() *http.Response { + return bclr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bclr BlobChangeLeaseResponse) StatusCode() int { + return bclr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bclr BlobChangeLeaseResponse) Status() string { + return bclr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bclr BlobChangeLeaseResponse) ClientRequestID() string { + return bclr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bclr BlobChangeLeaseResponse) Date() time.Time { + s := bclr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bclr BlobChangeLeaseResponse) ErrorCode() string { + return bclr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bclr BlobChangeLeaseResponse) ETag() ETag { + return ETag(bclr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (bclr BlobChangeLeaseResponse) LastModified() time.Time { + s := bclr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseID returns the value for header x-ms-lease-id. +func (bclr BlobChangeLeaseResponse) LeaseID() string { + return bclr.rawResponse.Header.Get("x-ms-lease-id") +} + +// RequestID returns the value for header x-ms-request-id. +func (bclr BlobChangeLeaseResponse) RequestID() string { + return bclr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bclr BlobChangeLeaseResponse) Version() string { + return bclr.rawResponse.Header.Get("x-ms-version") +} + +// BlobCopyFromURLResponse ... +type BlobCopyFromURLResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bcfur BlobCopyFromURLResponse) Response() *http.Response { + return bcfur.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bcfur BlobCopyFromURLResponse) StatusCode() int { + return bcfur.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bcfur BlobCopyFromURLResponse) Status() string { + return bcfur.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bcfur BlobCopyFromURLResponse) ClientRequestID() string { + return bcfur.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentMD5 returns the value for header Content-MD5. +func (bcfur BlobCopyFromURLResponse) ContentMD5() []byte { + s := bcfur.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// CopyID returns the value for header x-ms-copy-id. +func (bcfur BlobCopyFromURLResponse) CopyID() string { + return bcfur.rawResponse.Header.Get("x-ms-copy-id") +} + +// CopyStatus returns the value for header x-ms-copy-status. +func (bcfur BlobCopyFromURLResponse) CopyStatus() SyncCopyStatusType { + return SyncCopyStatusType(bcfur.rawResponse.Header.Get("x-ms-copy-status")) +} + +// Date returns the value for header Date. +func (bcfur BlobCopyFromURLResponse) Date() time.Time { + s := bcfur.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bcfur BlobCopyFromURLResponse) ErrorCode() string { + return bcfur.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bcfur BlobCopyFromURLResponse) ETag() ETag { + return ETag(bcfur.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (bcfur BlobCopyFromURLResponse) LastModified() time.Time { + s := bcfur.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bcfur BlobCopyFromURLResponse) RequestID() string { + return bcfur.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bcfur BlobCopyFromURLResponse) Version() string { + return bcfur.rawResponse.Header.Get("x-ms-version") +} + +// VersionID returns the value for header x-ms-version-id. +func (bcfur BlobCopyFromURLResponse) VersionID() string { + return bcfur.rawResponse.Header.Get("x-ms-version-id") +} + +// XMsContentCrc64 returns the value for header x-ms-content-crc64. +func (bcfur BlobCopyFromURLResponse) XMsContentCrc64() []byte { + s := bcfur.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// BlobCreateSnapshotResponse ... +type BlobCreateSnapshotResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bcsr BlobCreateSnapshotResponse) Response() *http.Response { + return bcsr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bcsr BlobCreateSnapshotResponse) StatusCode() int { + return bcsr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bcsr BlobCreateSnapshotResponse) Status() string { + return bcsr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bcsr BlobCreateSnapshotResponse) ClientRequestID() string { + return bcsr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bcsr BlobCreateSnapshotResponse) Date() time.Time { + s := bcsr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bcsr BlobCreateSnapshotResponse) ErrorCode() string { + return bcsr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bcsr BlobCreateSnapshotResponse) ETag() ETag { + return ETag(bcsr.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (bcsr BlobCreateSnapshotResponse) IsServerEncrypted() string { + return bcsr.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (bcsr BlobCreateSnapshotResponse) LastModified() time.Time { + s := bcsr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bcsr BlobCreateSnapshotResponse) RequestID() string { + return bcsr.rawResponse.Header.Get("x-ms-request-id") +} + +// Snapshot returns the value for header x-ms-snapshot. +func (bcsr BlobCreateSnapshotResponse) Snapshot() string { + return bcsr.rawResponse.Header.Get("x-ms-snapshot") +} + +// Version returns the value for header x-ms-version. +func (bcsr BlobCreateSnapshotResponse) Version() string { + return bcsr.rawResponse.Header.Get("x-ms-version") +} + +// VersionID returns the value for header x-ms-version-id. +func (bcsr BlobCreateSnapshotResponse) VersionID() string { + return bcsr.rawResponse.Header.Get("x-ms-version-id") +} + +// BlobDeleteImmutabilityPolicyResponse ... +type BlobDeleteImmutabilityPolicyResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bdipr BlobDeleteImmutabilityPolicyResponse) Response() *http.Response { + return bdipr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bdipr BlobDeleteImmutabilityPolicyResponse) StatusCode() int { + return bdipr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bdipr BlobDeleteImmutabilityPolicyResponse) Status() string { + return bdipr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bdipr BlobDeleteImmutabilityPolicyResponse) ClientRequestID() string { + return bdipr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bdipr BlobDeleteImmutabilityPolicyResponse) Date() time.Time { + s := bdipr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bdipr BlobDeleteImmutabilityPolicyResponse) ErrorCode() string { + return bdipr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (bdipr BlobDeleteImmutabilityPolicyResponse) RequestID() string { + return bdipr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bdipr BlobDeleteImmutabilityPolicyResponse) Version() string { + return bdipr.rawResponse.Header.Get("x-ms-version") +} + +// BlobDeleteResponse ... +type BlobDeleteResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bdr BlobDeleteResponse) Response() *http.Response { + return bdr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bdr BlobDeleteResponse) StatusCode() int { + return bdr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bdr BlobDeleteResponse) Status() string { + return bdr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bdr BlobDeleteResponse) ClientRequestID() string { + return bdr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bdr BlobDeleteResponse) Date() time.Time { + s := bdr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bdr BlobDeleteResponse) ErrorCode() string { + return bdr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (bdr BlobDeleteResponse) RequestID() string { + return bdr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bdr BlobDeleteResponse) Version() string { + return bdr.rawResponse.Header.Get("x-ms-version") +} + +// BlobFlatListSegment ... +type BlobFlatListSegment struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Blobs"` + BlobItems []BlobItemInternal `xml:"Blob"` +} + +// BlobGetAccountInfoResponse ... +type BlobGetAccountInfoResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bgair BlobGetAccountInfoResponse) Response() *http.Response { + return bgair.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bgair BlobGetAccountInfoResponse) StatusCode() int { + return bgair.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bgair BlobGetAccountInfoResponse) Status() string { + return bgair.rawResponse.Status +} + +// AccountKind returns the value for header x-ms-account-kind. +func (bgair BlobGetAccountInfoResponse) AccountKind() AccountKindType { + return AccountKindType(bgair.rawResponse.Header.Get("x-ms-account-kind")) +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bgair BlobGetAccountInfoResponse) ClientRequestID() string { + return bgair.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bgair BlobGetAccountInfoResponse) Date() time.Time { + s := bgair.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bgair BlobGetAccountInfoResponse) ErrorCode() string { + return bgair.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (bgair BlobGetAccountInfoResponse) RequestID() string { + return bgair.rawResponse.Header.Get("x-ms-request-id") +} + +// SkuName returns the value for header x-ms-sku-name. +func (bgair BlobGetAccountInfoResponse) SkuName() SkuNameType { + return SkuNameType(bgair.rawResponse.Header.Get("x-ms-sku-name")) +} + +// Version returns the value for header x-ms-version. +func (bgair BlobGetAccountInfoResponse) Version() string { + return bgair.rawResponse.Header.Get("x-ms-version") +} + +// BlobGetPropertiesResponse ... +type BlobGetPropertiesResponse struct { + rawResponse *http.Response +} + +// NewMetadata returns user-defined key/value pairs. +func (bgpr BlobGetPropertiesResponse) NewMetadata() Metadata { + md := Metadata{} + for k, v := range bgpr.rawResponse.Header { + if len(k) > mdPrefixLen { + if prefix := k[0:mdPrefixLen]; strings.EqualFold(prefix, mdPrefix) { + md[strings.ToLower(k[mdPrefixLen:])] = v[0] + } + } + } + return md +} + +// Response returns the raw HTTP response object. +func (bgpr BlobGetPropertiesResponse) Response() *http.Response { + return bgpr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bgpr BlobGetPropertiesResponse) StatusCode() int { + return bgpr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bgpr BlobGetPropertiesResponse) Status() string { + return bgpr.rawResponse.Status +} + +// AcceptRanges returns the value for header Accept-Ranges. +func (bgpr BlobGetPropertiesResponse) AcceptRanges() string { + return bgpr.rawResponse.Header.Get("Accept-Ranges") +} + +// AccessTier returns the value for header x-ms-access-tier. +func (bgpr BlobGetPropertiesResponse) AccessTier() string { + return bgpr.rawResponse.Header.Get("x-ms-access-tier") +} + +// AccessTierChangeTime returns the value for header x-ms-access-tier-change-time. +func (bgpr BlobGetPropertiesResponse) AccessTierChangeTime() time.Time { + s := bgpr.rawResponse.Header.Get("x-ms-access-tier-change-time") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// AccessTierInferred returns the value for header x-ms-access-tier-inferred. +func (bgpr BlobGetPropertiesResponse) AccessTierInferred() string { + return bgpr.rawResponse.Header.Get("x-ms-access-tier-inferred") +} + +// ArchiveStatus returns the value for header x-ms-archive-status. +func (bgpr BlobGetPropertiesResponse) ArchiveStatus() string { + return bgpr.rawResponse.Header.Get("x-ms-archive-status") +} + +// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. +func (bgpr BlobGetPropertiesResponse) BlobCommittedBlockCount() int32 { + s := bgpr.rawResponse.Header.Get("x-ms-blob-committed-block-count") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + i = 0 + } + return int32(i) +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (bgpr BlobGetPropertiesResponse) BlobSequenceNumber() int64 { + s := bgpr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// BlobType returns the value for header x-ms-blob-type. +func (bgpr BlobGetPropertiesResponse) BlobType() BlobType { + return BlobType(bgpr.rawResponse.Header.Get("x-ms-blob-type")) +} + +// CacheControl returns the value for header Cache-Control. +func (bgpr BlobGetPropertiesResponse) CacheControl() string { + return bgpr.rawResponse.Header.Get("Cache-Control") +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bgpr BlobGetPropertiesResponse) ClientRequestID() string { + return bgpr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentDisposition returns the value for header Content-Disposition. +func (bgpr BlobGetPropertiesResponse) ContentDisposition() string { + return bgpr.rawResponse.Header.Get("Content-Disposition") +} + +// ContentEncoding returns the value for header Content-Encoding. +func (bgpr BlobGetPropertiesResponse) ContentEncoding() string { + return bgpr.rawResponse.Header.Get("Content-Encoding") +} + +// ContentLanguage returns the value for header Content-Language. +func (bgpr BlobGetPropertiesResponse) ContentLanguage() string { + return bgpr.rawResponse.Header.Get("Content-Language") +} + +// ContentLength returns the value for header Content-Length. +func (bgpr BlobGetPropertiesResponse) ContentLength() int64 { + s := bgpr.rawResponse.Header.Get("Content-Length") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// ContentMD5 returns the value for header Content-MD5. +func (bgpr BlobGetPropertiesResponse) ContentMD5() []byte { + s := bgpr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// ContentType returns the value for header Content-Type. +func (bgpr BlobGetPropertiesResponse) ContentType() string { + return bgpr.rawResponse.Header.Get("Content-Type") +} + +// CopyCompletionTime returns the value for header x-ms-copy-completion-time. +func (bgpr BlobGetPropertiesResponse) CopyCompletionTime() time.Time { + s := bgpr.rawResponse.Header.Get("x-ms-copy-completion-time") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// CopyID returns the value for header x-ms-copy-id. +func (bgpr BlobGetPropertiesResponse) CopyID() string { + return bgpr.rawResponse.Header.Get("x-ms-copy-id") +} + +// CopyProgress returns the value for header x-ms-copy-progress. +func (bgpr BlobGetPropertiesResponse) CopyProgress() string { + return bgpr.rawResponse.Header.Get("x-ms-copy-progress") +} + +// CopySource returns the value for header x-ms-copy-source. +func (bgpr BlobGetPropertiesResponse) CopySource() string { + return bgpr.rawResponse.Header.Get("x-ms-copy-source") +} + +// CopyStatus returns the value for header x-ms-copy-status. +func (bgpr BlobGetPropertiesResponse) CopyStatus() CopyStatusType { + return CopyStatusType(bgpr.rawResponse.Header.Get("x-ms-copy-status")) +} + +// CopyStatusDescription returns the value for header x-ms-copy-status-description. +func (bgpr BlobGetPropertiesResponse) CopyStatusDescription() string { + return bgpr.rawResponse.Header.Get("x-ms-copy-status-description") +} + +// CreationTime returns the value for header x-ms-creation-time. +func (bgpr BlobGetPropertiesResponse) CreationTime() time.Time { + s := bgpr.rawResponse.Header.Get("x-ms-creation-time") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// Date returns the value for header Date. +func (bgpr BlobGetPropertiesResponse) Date() time.Time { + s := bgpr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// DestinationSnapshot returns the value for header x-ms-copy-destination-snapshot. +func (bgpr BlobGetPropertiesResponse) DestinationSnapshot() string { + return bgpr.rawResponse.Header.Get("x-ms-copy-destination-snapshot") +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (bgpr BlobGetPropertiesResponse) EncryptionKeySha256() string { + return bgpr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (bgpr BlobGetPropertiesResponse) EncryptionScope() string { + return bgpr.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bgpr BlobGetPropertiesResponse) ErrorCode() string { + return bgpr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bgpr BlobGetPropertiesResponse) ETag() ETag { + return ETag(bgpr.rawResponse.Header.Get("ETag")) +} + +// ExpiresOn returns the value for header x-ms-expiry-time. +func (bgpr BlobGetPropertiesResponse) ExpiresOn() time.Time { + s := bgpr.rawResponse.Header.Get("x-ms-expiry-time") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ImmutabilityPolicyExpiresOn returns the value for header x-ms-immutability-policy-until-date. +func (bgpr BlobGetPropertiesResponse) ImmutabilityPolicyExpiresOn() time.Time { + s := bgpr.rawResponse.Header.Get("x-ms-immutability-policy-until-date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ImmutabilityPolicyMode returns the value for header x-ms-immutability-policy-mode. +func (bgpr BlobGetPropertiesResponse) ImmutabilityPolicyMode() BlobImmutabilityPolicyModeType { + return BlobImmutabilityPolicyModeType(bgpr.rawResponse.Header.Get("x-ms-immutability-policy-mode")) +} + +// IsCurrentVersion returns the value for header x-ms-is-current-version. +func (bgpr BlobGetPropertiesResponse) IsCurrentVersion() string { + return bgpr.rawResponse.Header.Get("x-ms-is-current-version") +} + +// IsIncrementalCopy returns the value for header x-ms-incremental-copy. +func (bgpr BlobGetPropertiesResponse) IsIncrementalCopy() string { + return bgpr.rawResponse.Header.Get("x-ms-incremental-copy") +} + +// IsSealed returns the value for header x-ms-blob-sealed. +func (bgpr BlobGetPropertiesResponse) IsSealed() string { + return bgpr.rawResponse.Header.Get("x-ms-blob-sealed") +} + +// IsServerEncrypted returns the value for header x-ms-server-encrypted. +func (bgpr BlobGetPropertiesResponse) IsServerEncrypted() string { + return bgpr.rawResponse.Header.Get("x-ms-server-encrypted") +} + +// LastAccessed returns the value for header x-ms-last-access-time. +func (bgpr BlobGetPropertiesResponse) LastAccessed() time.Time { + s := bgpr.rawResponse.Header.Get("x-ms-last-access-time") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LastModified returns the value for header Last-Modified. +func (bgpr BlobGetPropertiesResponse) LastModified() time.Time { + s := bgpr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseDuration returns the value for header x-ms-lease-duration. +func (bgpr BlobGetPropertiesResponse) LeaseDuration() LeaseDurationType { + return LeaseDurationType(bgpr.rawResponse.Header.Get("x-ms-lease-duration")) +} + +// LeaseState returns the value for header x-ms-lease-state. +func (bgpr BlobGetPropertiesResponse) LeaseState() LeaseStateType { + return LeaseStateType(bgpr.rawResponse.Header.Get("x-ms-lease-state")) +} + +// LeaseStatus returns the value for header x-ms-lease-status. +func (bgpr BlobGetPropertiesResponse) LeaseStatus() LeaseStatusType { + return LeaseStatusType(bgpr.rawResponse.Header.Get("x-ms-lease-status")) +} + +// LegalHold returns the value for header x-ms-legal-hold. +func (bgpr BlobGetPropertiesResponse) LegalHold() string { + return bgpr.rawResponse.Header.Get("x-ms-legal-hold") +} + +// ObjectReplicationPolicyID returns the value for header x-ms-or-policy-id. +func (bgpr BlobGetPropertiesResponse) ObjectReplicationPolicyID() string { + return bgpr.rawResponse.Header.Get("x-ms-or-policy-id") +} + +// ObjectReplicationRules returns the value for header x-ms-or. +func (bgpr BlobGetPropertiesResponse) ObjectReplicationRules() string { + return bgpr.rawResponse.Header.Get("x-ms-or") +} + +// RehydratePriority returns the value for header x-ms-rehydrate-priority. +func (bgpr BlobGetPropertiesResponse) RehydratePriority() string { + return bgpr.rawResponse.Header.Get("x-ms-rehydrate-priority") +} + +// RequestID returns the value for header x-ms-request-id. +func (bgpr BlobGetPropertiesResponse) RequestID() string { + return bgpr.rawResponse.Header.Get("x-ms-request-id") +} + +// TagCount returns the value for header x-ms-tag-count. +func (bgpr BlobGetPropertiesResponse) TagCount() int64 { + s := bgpr.rawResponse.Header.Get("x-ms-tag-count") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// Version returns the value for header x-ms-version. +func (bgpr BlobGetPropertiesResponse) Version() string { + return bgpr.rawResponse.Header.Get("x-ms-version") +} + +// VersionID returns the value for header x-ms-version-id. +func (bgpr BlobGetPropertiesResponse) VersionID() string { + return bgpr.rawResponse.Header.Get("x-ms-version-id") +} + +// BlobHierarchyListSegment ... +type BlobHierarchyListSegment struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Blobs"` + BlobPrefixes []BlobPrefix `xml:"BlobPrefix"` + BlobItems []BlobItemInternal `xml:"Blob"` +} + +// BlobItemInternal - An Azure Storage blob +type BlobItemInternal struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Blob"` + Name string `xml:"Name"` + Deleted bool `xml:"Deleted"` + Snapshot string `xml:"Snapshot"` + VersionID *string `xml:"VersionId"` + IsCurrentVersion *bool `xml:"IsCurrentVersion"` + Properties BlobPropertiesInternal `xml:"Properties"` + Metadata Metadata `xml:"Metadata"` + BlobTags *BlobTags `xml:"Tags"` + ObjectReplicationMetadata map[string]string `xml:"ObjectReplicationMetadata"` + HasVersionsOnly *bool `xml:"HasVersionsOnly"` +} + +// BlobPrefix ... +type BlobPrefix struct { + Name string `xml:"Name"` +} + +// BlobPropertiesInternal - Properties of a blob +type BlobPropertiesInternal struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Properties"` + CreationTime *time.Time `xml:"Creation-Time"` + LastModified time.Time `xml:"Last-Modified"` + Etag ETag `xml:"Etag"` + // ContentLength - Size in bytes + ContentLength *int64 `xml:"Content-Length"` + ContentType *string `xml:"Content-Type"` + ContentEncoding *string `xml:"Content-Encoding"` + ContentLanguage *string `xml:"Content-Language"` + ContentMD5 []byte `xml:"Content-MD5"` + ContentDisposition *string `xml:"Content-Disposition"` + CacheControl *string `xml:"Cache-Control"` + BlobSequenceNumber *int64 `xml:"x-ms-blob-sequence-number"` + // BlobType - Possible values include: 'BlobBlockBlob', 'BlobPageBlob', 'BlobAppendBlob', 'BlobNone' + BlobType BlobType `xml:"BlobType"` + // LeaseStatus - Possible values include: 'LeaseStatusLocked', 'LeaseStatusUnlocked', 'LeaseStatusNone' + LeaseStatus LeaseStatusType `xml:"LeaseStatus"` + // LeaseState - Possible values include: 'LeaseStateAvailable', 'LeaseStateLeased', 'LeaseStateExpired', 'LeaseStateBreaking', 'LeaseStateBroken', 'LeaseStateNone' + LeaseState LeaseStateType `xml:"LeaseState"` + // LeaseDuration - Possible values include: 'LeaseDurationInfinite', 'LeaseDurationFixed', 'LeaseDurationNone' + LeaseDuration LeaseDurationType `xml:"LeaseDuration"` + CopyID *string `xml:"CopyId"` + // CopyStatus - Possible values include: 'CopyStatusPending', 'CopyStatusSuccess', 'CopyStatusAborted', 'CopyStatusFailed', 'CopyStatusNone' + CopyStatus CopyStatusType `xml:"CopyStatus"` + CopySource *string `xml:"CopySource"` + CopyProgress *string `xml:"CopyProgress"` + CopyCompletionTime *time.Time `xml:"CopyCompletionTime"` + CopyStatusDescription *string `xml:"CopyStatusDescription"` + ServerEncrypted *bool `xml:"ServerEncrypted"` + IncrementalCopy *bool `xml:"IncrementalCopy"` + DestinationSnapshot *string `xml:"DestinationSnapshot"` + DeletedTime *time.Time `xml:"DeletedTime"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` + // AccessTier - Possible values include: 'AccessTierP4', 'AccessTierP6', 'AccessTierP10', 'AccessTierP15', 'AccessTierP20', 'AccessTierP30', 'AccessTierP40', 'AccessTierP50', 'AccessTierP60', 'AccessTierP70', 'AccessTierP80', 'AccessTierHot', 'AccessTierCool', 'AccessTierArchive', 'AccessTierNone' + AccessTier AccessTierType `xml:"AccessTier"` + AccessTierInferred *bool `xml:"AccessTierInferred"` + // ArchiveStatus - Possible values include: 'ArchiveStatusRehydratePendingToHot', 'ArchiveStatusRehydratePendingToCool', 'ArchiveStatusNone' + ArchiveStatus ArchiveStatusType `xml:"ArchiveStatus"` + CustomerProvidedKeySha256 *string `xml:"CustomerProvidedKeySha256"` + // EncryptionScope - The name of the encryption scope under which the blob is encrypted. + EncryptionScope *string `xml:"EncryptionScope"` + AccessTierChangeTime *time.Time `xml:"AccessTierChangeTime"` + TagCount *int32 `xml:"TagCount"` + ExpiresOn *time.Time `xml:"Expiry-Time"` + IsSealed *bool `xml:"Sealed"` + // RehydratePriority - Possible values include: 'RehydratePriorityHigh', 'RehydratePriorityStandard', 'RehydratePriorityNone' + RehydratePriority RehydratePriorityType `xml:"RehydratePriority"` + LastAccessedOn *time.Time `xml:"LastAccessTime"` + ImmutabilityPolicyExpiresOn *time.Time `xml:"ImmutabilityPolicyUntilDate"` + // ImmutabilityPolicyMode - Possible values include: 'BlobImmutabilityPolicyModeMutable', 'BlobImmutabilityPolicyModeUnlocked', 'BlobImmutabilityPolicyModeLocked', 'BlobImmutabilityPolicyModeNone' + ImmutabilityPolicyMode BlobImmutabilityPolicyModeType `xml:"ImmutabilityPolicyMode"` + LegalHold *bool `xml:"LegalHold"` + Owner *string `xml:"Owner"` + Group *string `xml:"Group"` + Permissions *string `xml:"Permissions"` + ACL *string `xml:"Acl"` +} + +// MarshalXML implements the xml.Marshaler interface for BlobPropertiesInternal. +func (bpi BlobPropertiesInternal) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + bpi2 := (*blobPropertiesInternal)(unsafe.Pointer(&bpi)) + return e.EncodeElement(*bpi2, start) +} + +// UnmarshalXML implements the xml.Unmarshaler interface for BlobPropertiesInternal. +func (bpi *BlobPropertiesInternal) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + bpi2 := (*blobPropertiesInternal)(unsafe.Pointer(bpi)) + return d.DecodeElement(bpi2, &start) +} + +// BlobReleaseLeaseResponse ... +type BlobReleaseLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (brlr BlobReleaseLeaseResponse) Response() *http.Response { + return brlr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (brlr BlobReleaseLeaseResponse) StatusCode() int { + return brlr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (brlr BlobReleaseLeaseResponse) Status() string { + return brlr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (brlr BlobReleaseLeaseResponse) ClientRequestID() string { + return brlr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (brlr BlobReleaseLeaseResponse) Date() time.Time { + s := brlr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (brlr BlobReleaseLeaseResponse) ErrorCode() string { + return brlr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (brlr BlobReleaseLeaseResponse) ETag() ETag { + return ETag(brlr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (brlr BlobReleaseLeaseResponse) LastModified() time.Time { + s := brlr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (brlr BlobReleaseLeaseResponse) RequestID() string { + return brlr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (brlr BlobReleaseLeaseResponse) Version() string { + return brlr.rawResponse.Header.Get("x-ms-version") +} + +// BlobRenewLeaseResponse ... +type BlobRenewLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (brlr BlobRenewLeaseResponse) Response() *http.Response { + return brlr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (brlr BlobRenewLeaseResponse) StatusCode() int { + return brlr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (brlr BlobRenewLeaseResponse) Status() string { + return brlr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (brlr BlobRenewLeaseResponse) ClientRequestID() string { + return brlr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (brlr BlobRenewLeaseResponse) Date() time.Time { + s := brlr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (brlr BlobRenewLeaseResponse) ErrorCode() string { + return brlr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (brlr BlobRenewLeaseResponse) ETag() ETag { + return ETag(brlr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (brlr BlobRenewLeaseResponse) LastModified() time.Time { + s := brlr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseID returns the value for header x-ms-lease-id. +func (brlr BlobRenewLeaseResponse) LeaseID() string { + return brlr.rawResponse.Header.Get("x-ms-lease-id") +} + +// RequestID returns the value for header x-ms-request-id. +func (brlr BlobRenewLeaseResponse) RequestID() string { + return brlr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (brlr BlobRenewLeaseResponse) Version() string { + return brlr.rawResponse.Header.Get("x-ms-version") +} + +// BlobSetExpiryResponse ... +type BlobSetExpiryResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bser BlobSetExpiryResponse) Response() *http.Response { + return bser.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bser BlobSetExpiryResponse) StatusCode() int { + return bser.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bser BlobSetExpiryResponse) Status() string { + return bser.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bser BlobSetExpiryResponse) ClientRequestID() string { + return bser.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bser BlobSetExpiryResponse) Date() time.Time { + s := bser.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bser BlobSetExpiryResponse) ErrorCode() string { + return bser.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bser BlobSetExpiryResponse) ETag() ETag { + return ETag(bser.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (bser BlobSetExpiryResponse) LastModified() time.Time { + s := bser.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bser BlobSetExpiryResponse) RequestID() string { + return bser.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bser BlobSetExpiryResponse) Version() string { + return bser.rawResponse.Header.Get("x-ms-version") +} + +// BlobSetHTTPHeadersResponse ... +type BlobSetHTTPHeadersResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bshhr BlobSetHTTPHeadersResponse) Response() *http.Response { + return bshhr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bshhr BlobSetHTTPHeadersResponse) StatusCode() int { + return bshhr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bshhr BlobSetHTTPHeadersResponse) Status() string { + return bshhr.rawResponse.Status +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (bshhr BlobSetHTTPHeadersResponse) BlobSequenceNumber() int64 { + s := bshhr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bshhr BlobSetHTTPHeadersResponse) ClientRequestID() string { + return bshhr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bshhr BlobSetHTTPHeadersResponse) Date() time.Time { + s := bshhr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bshhr BlobSetHTTPHeadersResponse) ErrorCode() string { + return bshhr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bshhr BlobSetHTTPHeadersResponse) ETag() ETag { + return ETag(bshhr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (bshhr BlobSetHTTPHeadersResponse) LastModified() time.Time { + s := bshhr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bshhr BlobSetHTTPHeadersResponse) RequestID() string { + return bshhr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bshhr BlobSetHTTPHeadersResponse) Version() string { + return bshhr.rawResponse.Header.Get("x-ms-version") +} + +// BlobSetImmutabilityPolicyResponse ... +type BlobSetImmutabilityPolicyResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bsipr BlobSetImmutabilityPolicyResponse) Response() *http.Response { + return bsipr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bsipr BlobSetImmutabilityPolicyResponse) StatusCode() int { + return bsipr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bsipr BlobSetImmutabilityPolicyResponse) Status() string { + return bsipr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bsipr BlobSetImmutabilityPolicyResponse) ClientRequestID() string { + return bsipr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bsipr BlobSetImmutabilityPolicyResponse) Date() time.Time { + s := bsipr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bsipr BlobSetImmutabilityPolicyResponse) ErrorCode() string { + return bsipr.rawResponse.Header.Get("x-ms-error-code") +} + +// ImmutabilityPolicyExpiry returns the value for header x-ms-immutability-policy-until-date. +func (bsipr BlobSetImmutabilityPolicyResponse) ImmutabilityPolicyExpiry() time.Time { + s := bsipr.rawResponse.Header.Get("x-ms-immutability-policy-until-date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ImmutabilityPolicyMode returns the value for header x-ms-immutability-policy-mode. +func (bsipr BlobSetImmutabilityPolicyResponse) ImmutabilityPolicyMode() BlobImmutabilityPolicyModeType { + return BlobImmutabilityPolicyModeType(bsipr.rawResponse.Header.Get("x-ms-immutability-policy-mode")) +} + +// RequestID returns the value for header x-ms-request-id. +func (bsipr BlobSetImmutabilityPolicyResponse) RequestID() string { + return bsipr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bsipr BlobSetImmutabilityPolicyResponse) Version() string { + return bsipr.rawResponse.Header.Get("x-ms-version") +} + +// BlobSetLegalHoldResponse ... +type BlobSetLegalHoldResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bslhr BlobSetLegalHoldResponse) Response() *http.Response { + return bslhr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bslhr BlobSetLegalHoldResponse) StatusCode() int { + return bslhr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bslhr BlobSetLegalHoldResponse) Status() string { + return bslhr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bslhr BlobSetLegalHoldResponse) ClientRequestID() string { + return bslhr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bslhr BlobSetLegalHoldResponse) Date() time.Time { + s := bslhr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bslhr BlobSetLegalHoldResponse) ErrorCode() string { + return bslhr.rawResponse.Header.Get("x-ms-error-code") +} + +// LegalHold returns the value for header x-ms-legal-hold. +func (bslhr BlobSetLegalHoldResponse) LegalHold() string { + return bslhr.rawResponse.Header.Get("x-ms-legal-hold") +} + +// RequestID returns the value for header x-ms-request-id. +func (bslhr BlobSetLegalHoldResponse) RequestID() string { + return bslhr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bslhr BlobSetLegalHoldResponse) Version() string { + return bslhr.rawResponse.Header.Get("x-ms-version") +} + +// BlobSetMetadataResponse ... +type BlobSetMetadataResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bsmr BlobSetMetadataResponse) Response() *http.Response { + return bsmr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bsmr BlobSetMetadataResponse) StatusCode() int { + return bsmr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bsmr BlobSetMetadataResponse) Status() string { + return bsmr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bsmr BlobSetMetadataResponse) ClientRequestID() string { + return bsmr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bsmr BlobSetMetadataResponse) Date() time.Time { + s := bsmr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (bsmr BlobSetMetadataResponse) EncryptionKeySha256() string { + return bsmr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (bsmr BlobSetMetadataResponse) EncryptionScope() string { + return bsmr.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bsmr BlobSetMetadataResponse) ErrorCode() string { + return bsmr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bsmr BlobSetMetadataResponse) ETag() ETag { + return ETag(bsmr.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (bsmr BlobSetMetadataResponse) IsServerEncrypted() string { + return bsmr.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (bsmr BlobSetMetadataResponse) LastModified() time.Time { + s := bsmr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bsmr BlobSetMetadataResponse) RequestID() string { + return bsmr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bsmr BlobSetMetadataResponse) Version() string { + return bsmr.rawResponse.Header.Get("x-ms-version") +} + +// VersionID returns the value for header x-ms-version-id. +func (bsmr BlobSetMetadataResponse) VersionID() string { + return bsmr.rawResponse.Header.Get("x-ms-version-id") +} + +// BlobSetTagsResponse ... +type BlobSetTagsResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bstr BlobSetTagsResponse) Response() *http.Response { + return bstr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bstr BlobSetTagsResponse) StatusCode() int { + return bstr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bstr BlobSetTagsResponse) Status() string { + return bstr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bstr BlobSetTagsResponse) ClientRequestID() string { + return bstr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bstr BlobSetTagsResponse) Date() time.Time { + s := bstr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bstr BlobSetTagsResponse) ErrorCode() string { + return bstr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (bstr BlobSetTagsResponse) RequestID() string { + return bstr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bstr BlobSetTagsResponse) Version() string { + return bstr.rawResponse.Header.Get("x-ms-version") +} + +// BlobSetTierResponse ... +type BlobSetTierResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bstr BlobSetTierResponse) Response() *http.Response { + return bstr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bstr BlobSetTierResponse) StatusCode() int { + return bstr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bstr BlobSetTierResponse) Status() string { + return bstr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bstr BlobSetTierResponse) ClientRequestID() string { + return bstr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bstr BlobSetTierResponse) ErrorCode() string { + return bstr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (bstr BlobSetTierResponse) RequestID() string { + return bstr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bstr BlobSetTierResponse) Version() string { + return bstr.rawResponse.Header.Get("x-ms-version") +} + +// BlobStartCopyFromURLResponse ... +type BlobStartCopyFromURLResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bscfur BlobStartCopyFromURLResponse) Response() *http.Response { + return bscfur.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bscfur BlobStartCopyFromURLResponse) StatusCode() int { + return bscfur.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bscfur BlobStartCopyFromURLResponse) Status() string { + return bscfur.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bscfur BlobStartCopyFromURLResponse) ClientRequestID() string { + return bscfur.rawResponse.Header.Get("x-ms-client-request-id") +} + +// CopyID returns the value for header x-ms-copy-id. +func (bscfur BlobStartCopyFromURLResponse) CopyID() string { + return bscfur.rawResponse.Header.Get("x-ms-copy-id") +} + +// CopyStatus returns the value for header x-ms-copy-status. +func (bscfur BlobStartCopyFromURLResponse) CopyStatus() CopyStatusType { + return CopyStatusType(bscfur.rawResponse.Header.Get("x-ms-copy-status")) +} + +// Date returns the value for header Date. +func (bscfur BlobStartCopyFromURLResponse) Date() time.Time { + s := bscfur.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bscfur BlobStartCopyFromURLResponse) ErrorCode() string { + return bscfur.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bscfur BlobStartCopyFromURLResponse) ETag() ETag { + return ETag(bscfur.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (bscfur BlobStartCopyFromURLResponse) LastModified() time.Time { + s := bscfur.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bscfur BlobStartCopyFromURLResponse) RequestID() string { + return bscfur.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bscfur BlobStartCopyFromURLResponse) Version() string { + return bscfur.rawResponse.Header.Get("x-ms-version") +} + +// VersionID returns the value for header x-ms-version-id. +func (bscfur BlobStartCopyFromURLResponse) VersionID() string { + return bscfur.rawResponse.Header.Get("x-ms-version-id") +} + +// BlobTag ... +type BlobTag struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Tag"` + Key string `xml:"Key"` + Value string `xml:"Value"` +} + +// BlobTags - Blob tags +type BlobTags struct { + rawResponse *http.Response + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Tags"` + BlobTagSet []BlobTag `xml:"TagSet>Tag"` +} + +// Response returns the raw HTTP response object. +func (bt BlobTags) Response() *http.Response { + return bt.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bt BlobTags) StatusCode() int { + return bt.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bt BlobTags) Status() string { + return bt.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bt BlobTags) ClientRequestID() string { + return bt.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bt BlobTags) Date() time.Time { + s := bt.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bt BlobTags) ErrorCode() string { + return bt.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (bt BlobTags) RequestID() string { + return bt.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bt BlobTags) Version() string { + return bt.rawResponse.Header.Get("x-ms-version") +} + +// BlobUndeleteResponse ... +type BlobUndeleteResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bur BlobUndeleteResponse) Response() *http.Response { + return bur.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bur BlobUndeleteResponse) StatusCode() int { + return bur.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bur BlobUndeleteResponse) Status() string { + return bur.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bur BlobUndeleteResponse) ClientRequestID() string { + return bur.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bur BlobUndeleteResponse) Date() time.Time { + s := bur.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bur BlobUndeleteResponse) ErrorCode() string { + return bur.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (bur BlobUndeleteResponse) RequestID() string { + return bur.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bur BlobUndeleteResponse) Version() string { + return bur.rawResponse.Header.Get("x-ms-version") +} + +// Block - Represents a single block in a block blob. It describes the block's ID and size. +type Block struct { + // Name - The base64 encoded block ID. + Name string `xml:"Name"` + // Size - The block size in bytes. + Size int64 `xml:"Size"` +} + +// BlockBlobCommitBlockListResponse ... +type BlockBlobCommitBlockListResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bbcblr BlockBlobCommitBlockListResponse) Response() *http.Response { + return bbcblr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bbcblr BlockBlobCommitBlockListResponse) StatusCode() int { + return bbcblr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bbcblr BlockBlobCommitBlockListResponse) Status() string { + return bbcblr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bbcblr BlockBlobCommitBlockListResponse) ClientRequestID() string { + return bbcblr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentMD5 returns the value for header Content-MD5. +func (bbcblr BlockBlobCommitBlockListResponse) ContentMD5() []byte { + s := bbcblr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (bbcblr BlockBlobCommitBlockListResponse) Date() time.Time { + s := bbcblr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (bbcblr BlockBlobCommitBlockListResponse) EncryptionKeySha256() string { + return bbcblr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (bbcblr BlockBlobCommitBlockListResponse) EncryptionScope() string { + return bbcblr.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bbcblr BlockBlobCommitBlockListResponse) ErrorCode() string { + return bbcblr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bbcblr BlockBlobCommitBlockListResponse) ETag() ETag { + return ETag(bbcblr.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (bbcblr BlockBlobCommitBlockListResponse) IsServerEncrypted() string { + return bbcblr.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (bbcblr BlockBlobCommitBlockListResponse) LastModified() time.Time { + s := bbcblr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bbcblr BlockBlobCommitBlockListResponse) RequestID() string { + return bbcblr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bbcblr BlockBlobCommitBlockListResponse) Version() string { + return bbcblr.rawResponse.Header.Get("x-ms-version") +} + +// VersionID returns the value for header x-ms-version-id. +func (bbcblr BlockBlobCommitBlockListResponse) VersionID() string { + return bbcblr.rawResponse.Header.Get("x-ms-version-id") +} + +// XMsContentCrc64 returns the value for header x-ms-content-crc64. +func (bbcblr BlockBlobCommitBlockListResponse) XMsContentCrc64() []byte { + s := bbcblr.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// BlockBlobPutBlobFromURLResponse ... +type BlockBlobPutBlobFromURLResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bbpbfur BlockBlobPutBlobFromURLResponse) Response() *http.Response { + return bbpbfur.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bbpbfur BlockBlobPutBlobFromURLResponse) StatusCode() int { + return bbpbfur.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bbpbfur BlockBlobPutBlobFromURLResponse) Status() string { + return bbpbfur.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bbpbfur BlockBlobPutBlobFromURLResponse) ClientRequestID() string { + return bbpbfur.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentMD5 returns the value for header Content-MD5. +func (bbpbfur BlockBlobPutBlobFromURLResponse) ContentMD5() []byte { + s := bbpbfur.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (bbpbfur BlockBlobPutBlobFromURLResponse) Date() time.Time { + s := bbpbfur.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (bbpbfur BlockBlobPutBlobFromURLResponse) EncryptionKeySha256() string { + return bbpbfur.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (bbpbfur BlockBlobPutBlobFromURLResponse) EncryptionScope() string { + return bbpbfur.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bbpbfur BlockBlobPutBlobFromURLResponse) ErrorCode() string { + return bbpbfur.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bbpbfur BlockBlobPutBlobFromURLResponse) ETag() ETag { + return ETag(bbpbfur.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (bbpbfur BlockBlobPutBlobFromURLResponse) IsServerEncrypted() string { + return bbpbfur.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (bbpbfur BlockBlobPutBlobFromURLResponse) LastModified() time.Time { + s := bbpbfur.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bbpbfur BlockBlobPutBlobFromURLResponse) RequestID() string { + return bbpbfur.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bbpbfur BlockBlobPutBlobFromURLResponse) Version() string { + return bbpbfur.rawResponse.Header.Get("x-ms-version") +} + +// VersionID returns the value for header x-ms-version-id. +func (bbpbfur BlockBlobPutBlobFromURLResponse) VersionID() string { + return bbpbfur.rawResponse.Header.Get("x-ms-version-id") +} + +// BlockBlobStageBlockFromURLResponse ... +type BlockBlobStageBlockFromURLResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bbsbfur BlockBlobStageBlockFromURLResponse) Response() *http.Response { + return bbsbfur.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bbsbfur BlockBlobStageBlockFromURLResponse) StatusCode() int { + return bbsbfur.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bbsbfur BlockBlobStageBlockFromURLResponse) Status() string { + return bbsbfur.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bbsbfur BlockBlobStageBlockFromURLResponse) ClientRequestID() string { + return bbsbfur.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentMD5 returns the value for header Content-MD5. +func (bbsbfur BlockBlobStageBlockFromURLResponse) ContentMD5() []byte { + s := bbsbfur.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (bbsbfur BlockBlobStageBlockFromURLResponse) Date() time.Time { + s := bbsbfur.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (bbsbfur BlockBlobStageBlockFromURLResponse) EncryptionKeySha256() string { + return bbsbfur.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (bbsbfur BlockBlobStageBlockFromURLResponse) EncryptionScope() string { + return bbsbfur.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bbsbfur BlockBlobStageBlockFromURLResponse) ErrorCode() string { + return bbsbfur.rawResponse.Header.Get("x-ms-error-code") +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (bbsbfur BlockBlobStageBlockFromURLResponse) IsServerEncrypted() string { + return bbsbfur.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// RequestID returns the value for header x-ms-request-id. +func (bbsbfur BlockBlobStageBlockFromURLResponse) RequestID() string { + return bbsbfur.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bbsbfur BlockBlobStageBlockFromURLResponse) Version() string { + return bbsbfur.rawResponse.Header.Get("x-ms-version") +} + +// XMsContentCrc64 returns the value for header x-ms-content-crc64. +func (bbsbfur BlockBlobStageBlockFromURLResponse) XMsContentCrc64() []byte { + s := bbsbfur.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// BlockBlobStageBlockResponse ... +type BlockBlobStageBlockResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bbsbr BlockBlobStageBlockResponse) Response() *http.Response { + return bbsbr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bbsbr BlockBlobStageBlockResponse) StatusCode() int { + return bbsbr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bbsbr BlockBlobStageBlockResponse) Status() string { + return bbsbr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bbsbr BlockBlobStageBlockResponse) ClientRequestID() string { + return bbsbr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentMD5 returns the value for header Content-MD5. +func (bbsbr BlockBlobStageBlockResponse) ContentMD5() []byte { + s := bbsbr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (bbsbr BlockBlobStageBlockResponse) Date() time.Time { + s := bbsbr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (bbsbr BlockBlobStageBlockResponse) EncryptionKeySha256() string { + return bbsbr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (bbsbr BlockBlobStageBlockResponse) EncryptionScope() string { + return bbsbr.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bbsbr BlockBlobStageBlockResponse) ErrorCode() string { + return bbsbr.rawResponse.Header.Get("x-ms-error-code") +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (bbsbr BlockBlobStageBlockResponse) IsServerEncrypted() string { + return bbsbr.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// RequestID returns the value for header x-ms-request-id. +func (bbsbr BlockBlobStageBlockResponse) RequestID() string { + return bbsbr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bbsbr BlockBlobStageBlockResponse) Version() string { + return bbsbr.rawResponse.Header.Get("x-ms-version") +} + +// XMsContentCrc64 returns the value for header x-ms-content-crc64. +func (bbsbr BlockBlobStageBlockResponse) XMsContentCrc64() []byte { + s := bbsbr.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// BlockBlobUploadResponse ... +type BlockBlobUploadResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bbur BlockBlobUploadResponse) Response() *http.Response { + return bbur.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bbur BlockBlobUploadResponse) StatusCode() int { + return bbur.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bbur BlockBlobUploadResponse) Status() string { + return bbur.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bbur BlockBlobUploadResponse) ClientRequestID() string { + return bbur.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentMD5 returns the value for header Content-MD5. +func (bbur BlockBlobUploadResponse) ContentMD5() []byte { + s := bbur.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (bbur BlockBlobUploadResponse) Date() time.Time { + s := bbur.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (bbur BlockBlobUploadResponse) EncryptionKeySha256() string { + return bbur.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (bbur BlockBlobUploadResponse) EncryptionScope() string { + return bbur.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bbur BlockBlobUploadResponse) ErrorCode() string { + return bbur.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bbur BlockBlobUploadResponse) ETag() ETag { + return ETag(bbur.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (bbur BlockBlobUploadResponse) IsServerEncrypted() string { + return bbur.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (bbur BlockBlobUploadResponse) LastModified() time.Time { + s := bbur.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bbur BlockBlobUploadResponse) RequestID() string { + return bbur.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bbur BlockBlobUploadResponse) Version() string { + return bbur.rawResponse.Header.Get("x-ms-version") +} + +// VersionID returns the value for header x-ms-version-id. +func (bbur BlockBlobUploadResponse) VersionID() string { + return bbur.rawResponse.Header.Get("x-ms-version-id") +} + +// BlockList ... +type BlockList struct { + rawResponse *http.Response + CommittedBlocks []Block `xml:"CommittedBlocks>Block"` + UncommittedBlocks []Block `xml:"UncommittedBlocks>Block"` +} + +// Response returns the raw HTTP response object. +func (bl BlockList) Response() *http.Response { + return bl.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bl BlockList) StatusCode() int { + return bl.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bl BlockList) Status() string { + return bl.rawResponse.Status +} + +// BlobContentLength returns the value for header x-ms-blob-content-length. +func (bl BlockList) BlobContentLength() int64 { + s := bl.rawResponse.Header.Get("x-ms-blob-content-length") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bl BlockList) ClientRequestID() string { + return bl.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentType returns the value for header Content-Type. +func (bl BlockList) ContentType() string { + return bl.rawResponse.Header.Get("Content-Type") +} + +// Date returns the value for header Date. +func (bl BlockList) Date() time.Time { + s := bl.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bl BlockList) ErrorCode() string { + return bl.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bl BlockList) ETag() ETag { + return ETag(bl.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (bl BlockList) LastModified() time.Time { + s := bl.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bl BlockList) RequestID() string { + return bl.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bl BlockList) Version() string { + return bl.rawResponse.Header.Get("x-ms-version") +} + +// BlockLookupList ... +type BlockLookupList struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"BlockList"` + Committed []string `xml:"Committed"` + Uncommitted []string `xml:"Uncommitted"` + Latest []string `xml:"Latest"` +} + +// ClearRange ... +type ClearRange struct { + Start int64 `xml:"Start"` + End int64 `xml:"End"` +} + +// ContainerAcquireLeaseResponse ... +type ContainerAcquireLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (calr ContainerAcquireLeaseResponse) Response() *http.Response { + return calr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (calr ContainerAcquireLeaseResponse) StatusCode() int { + return calr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (calr ContainerAcquireLeaseResponse) Status() string { + return calr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (calr ContainerAcquireLeaseResponse) ClientRequestID() string { + return calr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (calr ContainerAcquireLeaseResponse) Date() time.Time { + s := calr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (calr ContainerAcquireLeaseResponse) ErrorCode() string { + return calr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (calr ContainerAcquireLeaseResponse) ETag() ETag { + return ETag(calr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (calr ContainerAcquireLeaseResponse) LastModified() time.Time { + s := calr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseID returns the value for header x-ms-lease-id. +func (calr ContainerAcquireLeaseResponse) LeaseID() string { + return calr.rawResponse.Header.Get("x-ms-lease-id") +} + +// RequestID returns the value for header x-ms-request-id. +func (calr ContainerAcquireLeaseResponse) RequestID() string { + return calr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (calr ContainerAcquireLeaseResponse) Version() string { + return calr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerBreakLeaseResponse ... +type ContainerBreakLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (cblr ContainerBreakLeaseResponse) Response() *http.Response { + return cblr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (cblr ContainerBreakLeaseResponse) StatusCode() int { + return cblr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (cblr ContainerBreakLeaseResponse) Status() string { + return cblr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (cblr ContainerBreakLeaseResponse) ClientRequestID() string { + return cblr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (cblr ContainerBreakLeaseResponse) Date() time.Time { + s := cblr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (cblr ContainerBreakLeaseResponse) ErrorCode() string { + return cblr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (cblr ContainerBreakLeaseResponse) ETag() ETag { + return ETag(cblr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (cblr ContainerBreakLeaseResponse) LastModified() time.Time { + s := cblr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseTime returns the value for header x-ms-lease-time. +func (cblr ContainerBreakLeaseResponse) LeaseTime() int32 { + s := cblr.rawResponse.Header.Get("x-ms-lease-time") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + i = 0 + } + return int32(i) +} + +// RequestID returns the value for header x-ms-request-id. +func (cblr ContainerBreakLeaseResponse) RequestID() string { + return cblr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (cblr ContainerBreakLeaseResponse) Version() string { + return cblr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerChangeLeaseResponse ... +type ContainerChangeLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (cclr ContainerChangeLeaseResponse) Response() *http.Response { + return cclr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (cclr ContainerChangeLeaseResponse) StatusCode() int { + return cclr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (cclr ContainerChangeLeaseResponse) Status() string { + return cclr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (cclr ContainerChangeLeaseResponse) ClientRequestID() string { + return cclr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (cclr ContainerChangeLeaseResponse) Date() time.Time { + s := cclr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (cclr ContainerChangeLeaseResponse) ErrorCode() string { + return cclr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (cclr ContainerChangeLeaseResponse) ETag() ETag { + return ETag(cclr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (cclr ContainerChangeLeaseResponse) LastModified() time.Time { + s := cclr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseID returns the value for header x-ms-lease-id. +func (cclr ContainerChangeLeaseResponse) LeaseID() string { + return cclr.rawResponse.Header.Get("x-ms-lease-id") +} + +// RequestID returns the value for header x-ms-request-id. +func (cclr ContainerChangeLeaseResponse) RequestID() string { + return cclr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (cclr ContainerChangeLeaseResponse) Version() string { + return cclr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerCreateResponse ... +type ContainerCreateResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (ccr ContainerCreateResponse) Response() *http.Response { + return ccr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (ccr ContainerCreateResponse) StatusCode() int { + return ccr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (ccr ContainerCreateResponse) Status() string { + return ccr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (ccr ContainerCreateResponse) ClientRequestID() string { + return ccr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (ccr ContainerCreateResponse) Date() time.Time { + s := ccr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (ccr ContainerCreateResponse) ErrorCode() string { + return ccr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (ccr ContainerCreateResponse) ETag() ETag { + return ETag(ccr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (ccr ContainerCreateResponse) LastModified() time.Time { + s := ccr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (ccr ContainerCreateResponse) RequestID() string { + return ccr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (ccr ContainerCreateResponse) Version() string { + return ccr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerDeleteResponse ... +type ContainerDeleteResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (cdr ContainerDeleteResponse) Response() *http.Response { + return cdr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (cdr ContainerDeleteResponse) StatusCode() int { + return cdr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (cdr ContainerDeleteResponse) Status() string { + return cdr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (cdr ContainerDeleteResponse) ClientRequestID() string { + return cdr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (cdr ContainerDeleteResponse) Date() time.Time { + s := cdr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (cdr ContainerDeleteResponse) ErrorCode() string { + return cdr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (cdr ContainerDeleteResponse) RequestID() string { + return cdr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (cdr ContainerDeleteResponse) Version() string { + return cdr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerGetAccountInfoResponse ... +type ContainerGetAccountInfoResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (cgair ContainerGetAccountInfoResponse) Response() *http.Response { + return cgair.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (cgair ContainerGetAccountInfoResponse) StatusCode() int { + return cgair.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (cgair ContainerGetAccountInfoResponse) Status() string { + return cgair.rawResponse.Status +} + +// AccountKind returns the value for header x-ms-account-kind. +func (cgair ContainerGetAccountInfoResponse) AccountKind() AccountKindType { + return AccountKindType(cgair.rawResponse.Header.Get("x-ms-account-kind")) +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (cgair ContainerGetAccountInfoResponse) ClientRequestID() string { + return cgair.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (cgair ContainerGetAccountInfoResponse) Date() time.Time { + s := cgair.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (cgair ContainerGetAccountInfoResponse) ErrorCode() string { + return cgair.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (cgair ContainerGetAccountInfoResponse) RequestID() string { + return cgair.rawResponse.Header.Get("x-ms-request-id") +} + +// SkuName returns the value for header x-ms-sku-name. +func (cgair ContainerGetAccountInfoResponse) SkuName() SkuNameType { + return SkuNameType(cgair.rawResponse.Header.Get("x-ms-sku-name")) +} + +// Version returns the value for header x-ms-version. +func (cgair ContainerGetAccountInfoResponse) Version() string { + return cgair.rawResponse.Header.Get("x-ms-version") +} + +// ContainerGetPropertiesResponse ... +type ContainerGetPropertiesResponse struct { + rawResponse *http.Response +} + +// NewMetadata returns user-defined key/value pairs. +func (cgpr ContainerGetPropertiesResponse) NewMetadata() Metadata { + md := Metadata{} + for k, v := range cgpr.rawResponse.Header { + if len(k) > mdPrefixLen { + if prefix := k[0:mdPrefixLen]; strings.EqualFold(prefix, mdPrefix) { + md[strings.ToLower(k[mdPrefixLen:])] = v[0] + } + } + } + return md +} + +// Response returns the raw HTTP response object. +func (cgpr ContainerGetPropertiesResponse) Response() *http.Response { + return cgpr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (cgpr ContainerGetPropertiesResponse) StatusCode() int { + return cgpr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (cgpr ContainerGetPropertiesResponse) Status() string { + return cgpr.rawResponse.Status +} + +// BlobPublicAccess returns the value for header x-ms-blob-public-access. +func (cgpr ContainerGetPropertiesResponse) BlobPublicAccess() PublicAccessType { + return PublicAccessType(cgpr.rawResponse.Header.Get("x-ms-blob-public-access")) +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (cgpr ContainerGetPropertiesResponse) ClientRequestID() string { + return cgpr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (cgpr ContainerGetPropertiesResponse) Date() time.Time { + s := cgpr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// DefaultEncryptionScope returns the value for header x-ms-default-encryption-scope. +func (cgpr ContainerGetPropertiesResponse) DefaultEncryptionScope() string { + return cgpr.rawResponse.Header.Get("x-ms-default-encryption-scope") +} + +// DenyEncryptionScopeOverride returns the value for header x-ms-deny-encryption-scope-override. +func (cgpr ContainerGetPropertiesResponse) DenyEncryptionScopeOverride() string { + return cgpr.rawResponse.Header.Get("x-ms-deny-encryption-scope-override") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (cgpr ContainerGetPropertiesResponse) ErrorCode() string { + return cgpr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (cgpr ContainerGetPropertiesResponse) ETag() ETag { + return ETag(cgpr.rawResponse.Header.Get("ETag")) +} + +// HasImmutabilityPolicy returns the value for header x-ms-has-immutability-policy. +func (cgpr ContainerGetPropertiesResponse) HasImmutabilityPolicy() string { + return cgpr.rawResponse.Header.Get("x-ms-has-immutability-policy") +} + +// HasLegalHold returns the value for header x-ms-has-legal-hold. +func (cgpr ContainerGetPropertiesResponse) HasLegalHold() string { + return cgpr.rawResponse.Header.Get("x-ms-has-legal-hold") +} + +// IsImmutableStorageWithVersioningEnabled returns the value for header x-ms-immutable-storage-with-versioning-enabled. +func (cgpr ContainerGetPropertiesResponse) IsImmutableStorageWithVersioningEnabled() string { + return cgpr.rawResponse.Header.Get("x-ms-immutable-storage-with-versioning-enabled") +} + +// LastModified returns the value for header Last-Modified. +func (cgpr ContainerGetPropertiesResponse) LastModified() time.Time { + s := cgpr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseDuration returns the value for header x-ms-lease-duration. +func (cgpr ContainerGetPropertiesResponse) LeaseDuration() LeaseDurationType { + return LeaseDurationType(cgpr.rawResponse.Header.Get("x-ms-lease-duration")) +} + +// LeaseState returns the value for header x-ms-lease-state. +func (cgpr ContainerGetPropertiesResponse) LeaseState() LeaseStateType { + return LeaseStateType(cgpr.rawResponse.Header.Get("x-ms-lease-state")) +} + +// LeaseStatus returns the value for header x-ms-lease-status. +func (cgpr ContainerGetPropertiesResponse) LeaseStatus() LeaseStatusType { + return LeaseStatusType(cgpr.rawResponse.Header.Get("x-ms-lease-status")) +} + +// RequestID returns the value for header x-ms-request-id. +func (cgpr ContainerGetPropertiesResponse) RequestID() string { + return cgpr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (cgpr ContainerGetPropertiesResponse) Version() string { + return cgpr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerItem - An Azure Storage container +type ContainerItem struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Container"` + Name string `xml:"Name"` + Deleted *bool `xml:"Deleted"` + Version *string `xml:"Version"` + Properties ContainerProperties `xml:"Properties"` + Metadata Metadata `xml:"Metadata"` +} + +// ContainerProperties - Properties of a container +type ContainerProperties struct { + LastModified time.Time `xml:"Last-Modified"` + Etag ETag `xml:"Etag"` + // LeaseStatus - Possible values include: 'LeaseStatusLocked', 'LeaseStatusUnlocked', 'LeaseStatusNone' + LeaseStatus LeaseStatusType `xml:"LeaseStatus"` + // LeaseState - Possible values include: 'LeaseStateAvailable', 'LeaseStateLeased', 'LeaseStateExpired', 'LeaseStateBreaking', 'LeaseStateBroken', 'LeaseStateNone' + LeaseState LeaseStateType `xml:"LeaseState"` + // LeaseDuration - Possible values include: 'LeaseDurationInfinite', 'LeaseDurationFixed', 'LeaseDurationNone' + LeaseDuration LeaseDurationType `xml:"LeaseDuration"` + // PublicAccess - Possible values include: 'PublicAccessContainer', 'PublicAccessBlob', 'PublicAccessNone' + PublicAccess PublicAccessType `xml:"PublicAccess"` + HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"` + HasLegalHold *bool `xml:"HasLegalHold"` + DefaultEncryptionScope *string `xml:"DefaultEncryptionScope"` + PreventEncryptionScopeOverride *bool `xml:"DenyEncryptionScopeOverride"` + DeletedTime *time.Time `xml:"DeletedTime"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` + // IsImmutableStorageWithVersioningEnabled - Indicates if version level worm is enabled on this container. + IsImmutableStorageWithVersioningEnabled *bool `xml:"ImmutableStorageWithVersioningEnabled"` +} + +// MarshalXML implements the xml.Marshaler interface for ContainerProperties. +func (cp ContainerProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + cp2 := (*containerProperties)(unsafe.Pointer(&cp)) + return e.EncodeElement(*cp2, start) +} + +// UnmarshalXML implements the xml.Unmarshaler interface for ContainerProperties. +func (cp *ContainerProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + cp2 := (*containerProperties)(unsafe.Pointer(cp)) + return d.DecodeElement(cp2, &start) +} + +// ContainerReleaseLeaseResponse ... +type ContainerReleaseLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (crlr ContainerReleaseLeaseResponse) Response() *http.Response { + return crlr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (crlr ContainerReleaseLeaseResponse) StatusCode() int { + return crlr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (crlr ContainerReleaseLeaseResponse) Status() string { + return crlr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (crlr ContainerReleaseLeaseResponse) ClientRequestID() string { + return crlr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (crlr ContainerReleaseLeaseResponse) Date() time.Time { + s := crlr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (crlr ContainerReleaseLeaseResponse) ErrorCode() string { + return crlr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (crlr ContainerReleaseLeaseResponse) ETag() ETag { + return ETag(crlr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (crlr ContainerReleaseLeaseResponse) LastModified() time.Time { + s := crlr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (crlr ContainerReleaseLeaseResponse) RequestID() string { + return crlr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (crlr ContainerReleaseLeaseResponse) Version() string { + return crlr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerRenameResponse ... +type ContainerRenameResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (crr ContainerRenameResponse) Response() *http.Response { + return crr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (crr ContainerRenameResponse) StatusCode() int { + return crr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (crr ContainerRenameResponse) Status() string { + return crr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (crr ContainerRenameResponse) ClientRequestID() string { + return crr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (crr ContainerRenameResponse) Date() time.Time { + s := crr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (crr ContainerRenameResponse) ErrorCode() string { + return crr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (crr ContainerRenameResponse) RequestID() string { + return crr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (crr ContainerRenameResponse) Version() string { + return crr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerRenewLeaseResponse ... +type ContainerRenewLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (crlr ContainerRenewLeaseResponse) Response() *http.Response { + return crlr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (crlr ContainerRenewLeaseResponse) StatusCode() int { + return crlr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (crlr ContainerRenewLeaseResponse) Status() string { + return crlr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (crlr ContainerRenewLeaseResponse) ClientRequestID() string { + return crlr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (crlr ContainerRenewLeaseResponse) Date() time.Time { + s := crlr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (crlr ContainerRenewLeaseResponse) ErrorCode() string { + return crlr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (crlr ContainerRenewLeaseResponse) ETag() ETag { + return ETag(crlr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (crlr ContainerRenewLeaseResponse) LastModified() time.Time { + s := crlr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseID returns the value for header x-ms-lease-id. +func (crlr ContainerRenewLeaseResponse) LeaseID() string { + return crlr.rawResponse.Header.Get("x-ms-lease-id") +} + +// RequestID returns the value for header x-ms-request-id. +func (crlr ContainerRenewLeaseResponse) RequestID() string { + return crlr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (crlr ContainerRenewLeaseResponse) Version() string { + return crlr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerRestoreResponse ... +type ContainerRestoreResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (crr ContainerRestoreResponse) Response() *http.Response { + return crr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (crr ContainerRestoreResponse) StatusCode() int { + return crr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (crr ContainerRestoreResponse) Status() string { + return crr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (crr ContainerRestoreResponse) ClientRequestID() string { + return crr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (crr ContainerRestoreResponse) Date() time.Time { + s := crr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (crr ContainerRestoreResponse) ErrorCode() string { + return crr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (crr ContainerRestoreResponse) RequestID() string { + return crr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (crr ContainerRestoreResponse) Version() string { + return crr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerSetAccessPolicyResponse ... +type ContainerSetAccessPolicyResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (csapr ContainerSetAccessPolicyResponse) Response() *http.Response { + return csapr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (csapr ContainerSetAccessPolicyResponse) StatusCode() int { + return csapr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (csapr ContainerSetAccessPolicyResponse) Status() string { + return csapr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (csapr ContainerSetAccessPolicyResponse) ClientRequestID() string { + return csapr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (csapr ContainerSetAccessPolicyResponse) Date() time.Time { + s := csapr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (csapr ContainerSetAccessPolicyResponse) ErrorCode() string { + return csapr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (csapr ContainerSetAccessPolicyResponse) ETag() ETag { + return ETag(csapr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (csapr ContainerSetAccessPolicyResponse) LastModified() time.Time { + s := csapr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (csapr ContainerSetAccessPolicyResponse) RequestID() string { + return csapr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (csapr ContainerSetAccessPolicyResponse) Version() string { + return csapr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerSetMetadataResponse ... +type ContainerSetMetadataResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (csmr ContainerSetMetadataResponse) Response() *http.Response { + return csmr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (csmr ContainerSetMetadataResponse) StatusCode() int { + return csmr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (csmr ContainerSetMetadataResponse) Status() string { + return csmr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (csmr ContainerSetMetadataResponse) ClientRequestID() string { + return csmr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (csmr ContainerSetMetadataResponse) Date() time.Time { + s := csmr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (csmr ContainerSetMetadataResponse) ErrorCode() string { + return csmr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (csmr ContainerSetMetadataResponse) ETag() ETag { + return ETag(csmr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (csmr ContainerSetMetadataResponse) LastModified() time.Time { + s := csmr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (csmr ContainerSetMetadataResponse) RequestID() string { + return csmr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (csmr ContainerSetMetadataResponse) Version() string { + return csmr.rawResponse.Header.Get("x-ms-version") +} + +// CorsRule - CORS is an HTTP feature that enables a web application running under one domain to access +// resources in another domain. Web browsers implement a security restriction known as same-origin policy that +// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain +// (the origin domain) to call APIs in another domain +type CorsRule struct { + // AllowedOrigins - The origin domains that are permitted to make a request against the storage service via CORS. The origin domain is the domain from which the request originates. Note that the origin must be an exact case-sensitive match with the origin that the user age sends to the service. You can also use the wildcard character '*' to allow all origin domains to make requests via CORS. + AllowedOrigins string `xml:"AllowedOrigins"` + // AllowedMethods - The methods (HTTP request verbs) that the origin domain may use for a CORS request. (comma separated) + AllowedMethods string `xml:"AllowedMethods"` + // AllowedHeaders - the request headers that the origin domain may specify on the CORS request. + AllowedHeaders string `xml:"AllowedHeaders"` + // ExposedHeaders - The response headers that may be sent in the response to the CORS request and exposed by the browser to the request issuer + ExposedHeaders string `xml:"ExposedHeaders"` + // MaxAgeInSeconds - The maximum amount time that a browser should cache the preflight OPTIONS request. + MaxAgeInSeconds int32 `xml:"MaxAgeInSeconds"` +} + +// DelimitedTextConfiguration - Groups the settings used for interpreting the blob data if the blob is +// delimited text formatted. +type DelimitedTextConfiguration struct { + // ColumnSeparator - The string used to separate columns. + ColumnSeparator *string `xml:"ColumnSeparator"` + // FieldQuote - The string used to quote a specific field. + FieldQuote *string `xml:"FieldQuote"` + // RecordSeparator - The string used to separate records. + RecordSeparator *string `xml:"RecordSeparator"` + // EscapeChar - The string used as an escape character. + EscapeChar *string `xml:"EscapeChar"` + // HeadersPresent - Represents whether the data has headers. + HeadersPresent *bool `xml:"HasHeaders"` +} + +// downloadResponse - Wraps the response from the blobClient.Download method. +type downloadResponse struct { + rawResponse *http.Response +} + +// NewMetadata returns user-defined key/value pairs. +func (dr downloadResponse) NewMetadata() Metadata { + md := Metadata{} + for k, v := range dr.rawResponse.Header { + if len(k) > mdPrefixLen { + if prefix := k[0:mdPrefixLen]; strings.EqualFold(prefix, mdPrefix) { + md[strings.ToLower(k[mdPrefixLen:])] = v[0] + } + } + } + return md +} + +// Response returns the raw HTTP response object. +func (dr downloadResponse) Response() *http.Response { + return dr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (dr downloadResponse) StatusCode() int { + return dr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (dr downloadResponse) Status() string { + return dr.rawResponse.Status +} + +// Body returns the raw HTTP response object's Body. +func (dr downloadResponse) Body() io.ReadCloser { + return dr.rawResponse.Body +} + +// AcceptRanges returns the value for header Accept-Ranges. +func (dr downloadResponse) AcceptRanges() string { + return dr.rawResponse.Header.Get("Accept-Ranges") +} + +// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. +func (dr downloadResponse) BlobCommittedBlockCount() int32 { + s := dr.rawResponse.Header.Get("x-ms-blob-committed-block-count") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + i = 0 + } + return int32(i) +} + +// BlobContentMD5 returns the value for header x-ms-blob-content-md5. +func (dr downloadResponse) BlobContentMD5() []byte { + s := dr.rawResponse.Header.Get("x-ms-blob-content-md5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (dr downloadResponse) BlobSequenceNumber() int64 { + s := dr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// BlobType returns the value for header x-ms-blob-type. +func (dr downloadResponse) BlobType() BlobType { + return BlobType(dr.rawResponse.Header.Get("x-ms-blob-type")) +} + +// CacheControl returns the value for header Cache-Control. +func (dr downloadResponse) CacheControl() string { + return dr.rawResponse.Header.Get("Cache-Control") +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (dr downloadResponse) ClientRequestID() string { + return dr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentCrc64 returns the value for header x-ms-content-crc64. +func (dr downloadResponse) ContentCrc64() []byte { + s := dr.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// ContentDisposition returns the value for header Content-Disposition. +func (dr downloadResponse) ContentDisposition() string { + return dr.rawResponse.Header.Get("Content-Disposition") +} + +// ContentEncoding returns the value for header Content-Encoding. +func (dr downloadResponse) ContentEncoding() string { + return dr.rawResponse.Header.Get("Content-Encoding") +} + +// ContentLanguage returns the value for header Content-Language. +func (dr downloadResponse) ContentLanguage() string { + return dr.rawResponse.Header.Get("Content-Language") +} + +// ContentLength returns the value for header Content-Length. +func (dr downloadResponse) ContentLength() int64 { + s := dr.rawResponse.Header.Get("Content-Length") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// ContentMD5 returns the value for header Content-MD5. +func (dr downloadResponse) ContentMD5() []byte { + s := dr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// ContentRange returns the value for header Content-Range. +func (dr downloadResponse) ContentRange() string { + return dr.rawResponse.Header.Get("Content-Range") +} + +// ContentType returns the value for header Content-Type. +func (dr downloadResponse) ContentType() string { + return dr.rawResponse.Header.Get("Content-Type") +} + +// CopyCompletionTime returns the value for header x-ms-copy-completion-time. +func (dr downloadResponse) CopyCompletionTime() time.Time { + s := dr.rawResponse.Header.Get("x-ms-copy-completion-time") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// CopyID returns the value for header x-ms-copy-id. +func (dr downloadResponse) CopyID() string { + return dr.rawResponse.Header.Get("x-ms-copy-id") +} + +// CopyProgress returns the value for header x-ms-copy-progress. +func (dr downloadResponse) CopyProgress() string { + return dr.rawResponse.Header.Get("x-ms-copy-progress") +} + +// CopySource returns the value for header x-ms-copy-source. +func (dr downloadResponse) CopySource() string { + return dr.rawResponse.Header.Get("x-ms-copy-source") +} + +// CopyStatus returns the value for header x-ms-copy-status. +func (dr downloadResponse) CopyStatus() CopyStatusType { + return CopyStatusType(dr.rawResponse.Header.Get("x-ms-copy-status")) +} + +// CopyStatusDescription returns the value for header x-ms-copy-status-description. +func (dr downloadResponse) CopyStatusDescription() string { + return dr.rawResponse.Header.Get("x-ms-copy-status-description") +} + +// Date returns the value for header Date. +func (dr downloadResponse) Date() time.Time { + s := dr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (dr downloadResponse) EncryptionKeySha256() string { + return dr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (dr downloadResponse) EncryptionScope() string { + return dr.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (dr downloadResponse) ErrorCode() string { + return dr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (dr downloadResponse) ETag() ETag { + return ETag(dr.rawResponse.Header.Get("ETag")) +} + +// ImmutabilityPolicyExpiresOn returns the value for header x-ms-immutability-policy-until-date. +func (dr downloadResponse) ImmutabilityPolicyExpiresOn() time.Time { + s := dr.rawResponse.Header.Get("x-ms-immutability-policy-until-date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ImmutabilityPolicyMode returns the value for header x-ms-immutability-policy-mode. +func (dr downloadResponse) ImmutabilityPolicyMode() string { + return string(dr.rawResponse.Header.Get("x-ms-immutability-policy-mode")) +} + +// IsCurrentVersion returns the value for header x-ms-is-current-version. +func (dr downloadResponse) IsCurrentVersion() string { + return dr.rawResponse.Header.Get("x-ms-is-current-version") +} + +// IsSealed returns the value for header x-ms-blob-sealed. +func (dr downloadResponse) IsSealed() string { + return dr.rawResponse.Header.Get("x-ms-blob-sealed") +} + +// IsServerEncrypted returns the value for header x-ms-server-encrypted. +func (dr downloadResponse) IsServerEncrypted() string { + return dr.rawResponse.Header.Get("x-ms-server-encrypted") +} + +// LastAccessed returns the value for header x-ms-last-access-time. +func (dr downloadResponse) LastAccessed() time.Time { + s := dr.rawResponse.Header.Get("x-ms-last-access-time") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LastModified returns the value for header Last-Modified. +func (dr downloadResponse) LastModified() time.Time { + s := dr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseDuration returns the value for header x-ms-lease-duration. +func (dr downloadResponse) LeaseDuration() LeaseDurationType { + return LeaseDurationType(dr.rawResponse.Header.Get("x-ms-lease-duration")) +} + +// LeaseState returns the value for header x-ms-lease-state. +func (dr downloadResponse) LeaseState() LeaseStateType { + return LeaseStateType(dr.rawResponse.Header.Get("x-ms-lease-state")) +} + +// LeaseStatus returns the value for header x-ms-lease-status. +func (dr downloadResponse) LeaseStatus() LeaseStatusType { + return LeaseStatusType(dr.rawResponse.Header.Get("x-ms-lease-status")) +} + +// LegalHold returns the value for header x-ms-legal-hold. +func (dr downloadResponse) LegalHold() string { + return dr.rawResponse.Header.Get("x-ms-legal-hold") +} + +// ObjectReplicationPolicyID returns the value for header x-ms-or-policy-id. +func (dr downloadResponse) ObjectReplicationPolicyID() string { + return dr.rawResponse.Header.Get("x-ms-or-policy-id") +} + +// ObjectReplicationRules returns the value for header x-ms-or. +func (dr downloadResponse) ObjectReplicationRules() string { + return dr.rawResponse.Header.Get("x-ms-or") +} + +// RequestID returns the value for header x-ms-request-id. +func (dr downloadResponse) RequestID() string { + return dr.rawResponse.Header.Get("x-ms-request-id") +} + +// TagCount returns the value for header x-ms-tag-count. +func (dr downloadResponse) TagCount() int64 { + s := dr.rawResponse.Header.Get("x-ms-tag-count") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// Version returns the value for header x-ms-version. +func (dr downloadResponse) Version() string { + return dr.rawResponse.Header.Get("x-ms-version") +} + +// VersionID returns the value for header x-ms-version-id. +func (dr downloadResponse) VersionID() string { + return dr.rawResponse.Header.Get("x-ms-version-id") +} + +// FilterBlobItem - Blob info from a Filter Blobs API call +type FilterBlobItem struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Blob"` + Name string `xml:"Name"` + ContainerName string `xml:"ContainerName"` + Tags *BlobTags `xml:"Tags"` +} + +// FilterBlobSegment - The result of a Filter Blobs API call +type FilterBlobSegment struct { + rawResponse *http.Response + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"EnumerationResults"` + ServiceEndpoint string `xml:"ServiceEndpoint,attr"` + Where string `xml:"Where"` + Blobs []FilterBlobItem `xml:"Blobs>Blob"` + NextMarker *string `xml:"NextMarker"` +} + +// Response returns the raw HTTP response object. +func (fbs FilterBlobSegment) Response() *http.Response { + return fbs.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (fbs FilterBlobSegment) StatusCode() int { + return fbs.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (fbs FilterBlobSegment) Status() string { + return fbs.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (fbs FilterBlobSegment) ClientRequestID() string { + return fbs.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (fbs FilterBlobSegment) Date() time.Time { + s := fbs.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (fbs FilterBlobSegment) ErrorCode() string { + return fbs.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (fbs FilterBlobSegment) RequestID() string { + return fbs.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (fbs FilterBlobSegment) Version() string { + return fbs.rawResponse.Header.Get("x-ms-version") +} + +// GeoReplication - Geo-Replication information for the Secondary Storage Service +type GeoReplication struct { + // Status - The status of the secondary location. Possible values include: 'GeoReplicationStatusLive', 'GeoReplicationStatusBootstrap', 'GeoReplicationStatusUnavailable', 'GeoReplicationStatusNone' + Status GeoReplicationStatusType `xml:"Status"` + // LastSyncTime - A GMT date/time value, to the second. All primary writes preceding this value are guaranteed to be available for read operations at the secondary. Primary writes after this point in time may or may not be available for reads. + LastSyncTime time.Time `xml:"LastSyncTime"` +} + +// MarshalXML implements the xml.Marshaler interface for GeoReplication. +func (gr GeoReplication) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + gr2 := (*geoReplication)(unsafe.Pointer(&gr)) + return e.EncodeElement(*gr2, start) +} + +// UnmarshalXML implements the xml.Unmarshaler interface for GeoReplication. +func (gr *GeoReplication) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + gr2 := (*geoReplication)(unsafe.Pointer(gr)) + return d.DecodeElement(gr2, &start) +} + +// JSONTextConfiguration - json text configuration +type JSONTextConfiguration struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"JsonTextConfiguration"` + // RecordSeparator - The string used to separate records. + RecordSeparator *string `xml:"RecordSeparator"` +} + +// KeyInfo - Key information +type KeyInfo struct { + // Start - The date-time the key is active in ISO 8601 UTC time + Start string `xml:"Start"` + // Expiry - The date-time the key expires in ISO 8601 UTC time + Expiry string `xml:"Expiry"` +} + +// ListBlobsFlatSegmentResponse - An enumeration of blobs +type ListBlobsFlatSegmentResponse struct { + rawResponse *http.Response + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"EnumerationResults"` + ServiceEndpoint string `xml:"ServiceEndpoint,attr"` + ContainerName string `xml:"ContainerName,attr"` + Prefix *string `xml:"Prefix"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + Segment BlobFlatListSegment `xml:"Blobs"` + NextMarker Marker `xml:"NextMarker"` +} + +// Response returns the raw HTTP response object. +func (lbfsr ListBlobsFlatSegmentResponse) Response() *http.Response { + return lbfsr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (lbfsr ListBlobsFlatSegmentResponse) StatusCode() int { + return lbfsr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (lbfsr ListBlobsFlatSegmentResponse) Status() string { + return lbfsr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (lbfsr ListBlobsFlatSegmentResponse) ClientRequestID() string { + return lbfsr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentType returns the value for header Content-Type. +func (lbfsr ListBlobsFlatSegmentResponse) ContentType() string { + return lbfsr.rawResponse.Header.Get("Content-Type") +} + +// Date returns the value for header Date. +func (lbfsr ListBlobsFlatSegmentResponse) Date() time.Time { + s := lbfsr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (lbfsr ListBlobsFlatSegmentResponse) ErrorCode() string { + return lbfsr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (lbfsr ListBlobsFlatSegmentResponse) RequestID() string { + return lbfsr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (lbfsr ListBlobsFlatSegmentResponse) Version() string { + return lbfsr.rawResponse.Header.Get("x-ms-version") +} + +// ListBlobsHierarchySegmentResponse - An enumeration of blobs +type ListBlobsHierarchySegmentResponse struct { + rawResponse *http.Response + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"EnumerationResults"` + ServiceEndpoint string `xml:"ServiceEndpoint,attr"` + ContainerName string `xml:"ContainerName,attr"` + Prefix *string `xml:"Prefix"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + Delimiter *string `xml:"Delimiter"` + Segment BlobHierarchyListSegment `xml:"Blobs"` + NextMarker Marker `xml:"NextMarker"` +} + +// Response returns the raw HTTP response object. +func (lbhsr ListBlobsHierarchySegmentResponse) Response() *http.Response { + return lbhsr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (lbhsr ListBlobsHierarchySegmentResponse) StatusCode() int { + return lbhsr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (lbhsr ListBlobsHierarchySegmentResponse) Status() string { + return lbhsr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (lbhsr ListBlobsHierarchySegmentResponse) ClientRequestID() string { + return lbhsr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentType returns the value for header Content-Type. +func (lbhsr ListBlobsHierarchySegmentResponse) ContentType() string { + return lbhsr.rawResponse.Header.Get("Content-Type") +} + +// Date returns the value for header Date. +func (lbhsr ListBlobsHierarchySegmentResponse) Date() time.Time { + s := lbhsr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (lbhsr ListBlobsHierarchySegmentResponse) ErrorCode() string { + return lbhsr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (lbhsr ListBlobsHierarchySegmentResponse) RequestID() string { + return lbhsr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (lbhsr ListBlobsHierarchySegmentResponse) Version() string { + return lbhsr.rawResponse.Header.Get("x-ms-version") +} + +// ListContainersSegmentResponse - An enumeration of containers +type ListContainersSegmentResponse struct { + rawResponse *http.Response + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"EnumerationResults"` + ServiceEndpoint string `xml:"ServiceEndpoint,attr"` + Prefix *string `xml:"Prefix"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + ContainerItems []ContainerItem `xml:"Containers>Container"` + NextMarker Marker `xml:"NextMarker"` +} + +// Response returns the raw HTTP response object. +func (lcsr ListContainersSegmentResponse) Response() *http.Response { + return lcsr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (lcsr ListContainersSegmentResponse) StatusCode() int { + return lcsr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (lcsr ListContainersSegmentResponse) Status() string { + return lcsr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (lcsr ListContainersSegmentResponse) ClientRequestID() string { + return lcsr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (lcsr ListContainersSegmentResponse) ErrorCode() string { + return lcsr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (lcsr ListContainersSegmentResponse) RequestID() string { + return lcsr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (lcsr ListContainersSegmentResponse) Version() string { + return lcsr.rawResponse.Header.Get("x-ms-version") +} + +// Logging - Azure Analytics Logging settings. +type Logging struct { + // Version - The version of Storage Analytics to configure. + Version string `xml:"Version"` + // Delete - Indicates whether all delete requests should be logged. + Delete bool `xml:"Delete"` + // Read - Indicates whether all read requests should be logged. + Read bool `xml:"Read"` + // Write - Indicates whether all write requests should be logged. + Write bool `xml:"Write"` + RetentionPolicy RetentionPolicy `xml:"RetentionPolicy"` +} + +// Metrics - a summary of request statistics grouped by API in hour or minute aggregates for blobs +type Metrics struct { + // Version - The version of Storage Analytics to configure. + Version *string `xml:"Version"` + // Enabled - Indicates whether metrics are enabled for the Blob service. + Enabled bool `xml:"Enabled"` + // IncludeAPIs - Indicates whether metrics should generate summary statistics for called API operations. + IncludeAPIs *bool `xml:"IncludeAPIs"` + RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` +} + +// PageBlobClearPagesResponse ... +type PageBlobClearPagesResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (pbcpr PageBlobClearPagesResponse) Response() *http.Response { + return pbcpr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pbcpr PageBlobClearPagesResponse) StatusCode() int { + return pbcpr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pbcpr PageBlobClearPagesResponse) Status() string { + return pbcpr.rawResponse.Status +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (pbcpr PageBlobClearPagesResponse) BlobSequenceNumber() int64 { + s := pbcpr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (pbcpr PageBlobClearPagesResponse) ClientRequestID() string { + return pbcpr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentMD5 returns the value for header Content-MD5. +func (pbcpr PageBlobClearPagesResponse) ContentMD5() []byte { + s := pbcpr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (pbcpr PageBlobClearPagesResponse) Date() time.Time { + s := pbcpr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pbcpr PageBlobClearPagesResponse) ErrorCode() string { + return pbcpr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pbcpr PageBlobClearPagesResponse) ETag() ETag { + return ETag(pbcpr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (pbcpr PageBlobClearPagesResponse) LastModified() time.Time { + s := pbcpr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pbcpr PageBlobClearPagesResponse) RequestID() string { + return pbcpr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pbcpr PageBlobClearPagesResponse) Version() string { + return pbcpr.rawResponse.Header.Get("x-ms-version") +} + +// XMsContentCrc64 returns the value for header x-ms-content-crc64. +func (pbcpr PageBlobClearPagesResponse) XMsContentCrc64() []byte { + s := pbcpr.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// PageBlobCopyIncrementalResponse ... +type PageBlobCopyIncrementalResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (pbcir PageBlobCopyIncrementalResponse) Response() *http.Response { + return pbcir.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pbcir PageBlobCopyIncrementalResponse) StatusCode() int { + return pbcir.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pbcir PageBlobCopyIncrementalResponse) Status() string { + return pbcir.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (pbcir PageBlobCopyIncrementalResponse) ClientRequestID() string { + return pbcir.rawResponse.Header.Get("x-ms-client-request-id") +} + +// CopyID returns the value for header x-ms-copy-id. +func (pbcir PageBlobCopyIncrementalResponse) CopyID() string { + return pbcir.rawResponse.Header.Get("x-ms-copy-id") +} + +// CopyStatus returns the value for header x-ms-copy-status. +func (pbcir PageBlobCopyIncrementalResponse) CopyStatus() CopyStatusType { + return CopyStatusType(pbcir.rawResponse.Header.Get("x-ms-copy-status")) +} + +// Date returns the value for header Date. +func (pbcir PageBlobCopyIncrementalResponse) Date() time.Time { + s := pbcir.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pbcir PageBlobCopyIncrementalResponse) ErrorCode() string { + return pbcir.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pbcir PageBlobCopyIncrementalResponse) ETag() ETag { + return ETag(pbcir.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (pbcir PageBlobCopyIncrementalResponse) LastModified() time.Time { + s := pbcir.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pbcir PageBlobCopyIncrementalResponse) RequestID() string { + return pbcir.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pbcir PageBlobCopyIncrementalResponse) Version() string { + return pbcir.rawResponse.Header.Get("x-ms-version") +} + +// PageBlobCreateResponse ... +type PageBlobCreateResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (pbcr PageBlobCreateResponse) Response() *http.Response { + return pbcr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pbcr PageBlobCreateResponse) StatusCode() int { + return pbcr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pbcr PageBlobCreateResponse) Status() string { + return pbcr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (pbcr PageBlobCreateResponse) ClientRequestID() string { + return pbcr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentMD5 returns the value for header Content-MD5. +func (pbcr PageBlobCreateResponse) ContentMD5() []byte { + s := pbcr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (pbcr PageBlobCreateResponse) Date() time.Time { + s := pbcr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (pbcr PageBlobCreateResponse) EncryptionKeySha256() string { + return pbcr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (pbcr PageBlobCreateResponse) EncryptionScope() string { + return pbcr.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pbcr PageBlobCreateResponse) ErrorCode() string { + return pbcr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pbcr PageBlobCreateResponse) ETag() ETag { + return ETag(pbcr.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (pbcr PageBlobCreateResponse) IsServerEncrypted() string { + return pbcr.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (pbcr PageBlobCreateResponse) LastModified() time.Time { + s := pbcr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pbcr PageBlobCreateResponse) RequestID() string { + return pbcr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pbcr PageBlobCreateResponse) Version() string { + return pbcr.rawResponse.Header.Get("x-ms-version") +} + +// VersionID returns the value for header x-ms-version-id. +func (pbcr PageBlobCreateResponse) VersionID() string { + return pbcr.rawResponse.Header.Get("x-ms-version-id") +} + +// PageBlobResizeResponse ... +type PageBlobResizeResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (pbrr PageBlobResizeResponse) Response() *http.Response { + return pbrr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pbrr PageBlobResizeResponse) StatusCode() int { + return pbrr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pbrr PageBlobResizeResponse) Status() string { + return pbrr.rawResponse.Status +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (pbrr PageBlobResizeResponse) BlobSequenceNumber() int64 { + s := pbrr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (pbrr PageBlobResizeResponse) ClientRequestID() string { + return pbrr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (pbrr PageBlobResizeResponse) Date() time.Time { + s := pbrr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pbrr PageBlobResizeResponse) ErrorCode() string { + return pbrr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pbrr PageBlobResizeResponse) ETag() ETag { + return ETag(pbrr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (pbrr PageBlobResizeResponse) LastModified() time.Time { + s := pbrr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pbrr PageBlobResizeResponse) RequestID() string { + return pbrr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pbrr PageBlobResizeResponse) Version() string { + return pbrr.rawResponse.Header.Get("x-ms-version") +} + +// PageBlobUpdateSequenceNumberResponse ... +type PageBlobUpdateSequenceNumberResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (pbusnr PageBlobUpdateSequenceNumberResponse) Response() *http.Response { + return pbusnr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pbusnr PageBlobUpdateSequenceNumberResponse) StatusCode() int { + return pbusnr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pbusnr PageBlobUpdateSequenceNumberResponse) Status() string { + return pbusnr.rawResponse.Status +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (pbusnr PageBlobUpdateSequenceNumberResponse) BlobSequenceNumber() int64 { + s := pbusnr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (pbusnr PageBlobUpdateSequenceNumberResponse) ClientRequestID() string { + return pbusnr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (pbusnr PageBlobUpdateSequenceNumberResponse) Date() time.Time { + s := pbusnr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pbusnr PageBlobUpdateSequenceNumberResponse) ErrorCode() string { + return pbusnr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pbusnr PageBlobUpdateSequenceNumberResponse) ETag() ETag { + return ETag(pbusnr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (pbusnr PageBlobUpdateSequenceNumberResponse) LastModified() time.Time { + s := pbusnr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pbusnr PageBlobUpdateSequenceNumberResponse) RequestID() string { + return pbusnr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pbusnr PageBlobUpdateSequenceNumberResponse) Version() string { + return pbusnr.rawResponse.Header.Get("x-ms-version") +} + +// PageBlobUploadPagesFromURLResponse ... +type PageBlobUploadPagesFromURLResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (pbupfur PageBlobUploadPagesFromURLResponse) Response() *http.Response { + return pbupfur.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pbupfur PageBlobUploadPagesFromURLResponse) StatusCode() int { + return pbupfur.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pbupfur PageBlobUploadPagesFromURLResponse) Status() string { + return pbupfur.rawResponse.Status +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (pbupfur PageBlobUploadPagesFromURLResponse) BlobSequenceNumber() int64 { + s := pbupfur.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// ContentMD5 returns the value for header Content-MD5. +func (pbupfur PageBlobUploadPagesFromURLResponse) ContentMD5() []byte { + s := pbupfur.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (pbupfur PageBlobUploadPagesFromURLResponse) Date() time.Time { + s := pbupfur.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (pbupfur PageBlobUploadPagesFromURLResponse) EncryptionKeySha256() string { + return pbupfur.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (pbupfur PageBlobUploadPagesFromURLResponse) EncryptionScope() string { + return pbupfur.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pbupfur PageBlobUploadPagesFromURLResponse) ErrorCode() string { + return pbupfur.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pbupfur PageBlobUploadPagesFromURLResponse) ETag() ETag { + return ETag(pbupfur.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (pbupfur PageBlobUploadPagesFromURLResponse) IsServerEncrypted() string { + return pbupfur.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (pbupfur PageBlobUploadPagesFromURLResponse) LastModified() time.Time { + s := pbupfur.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pbupfur PageBlobUploadPagesFromURLResponse) RequestID() string { + return pbupfur.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pbupfur PageBlobUploadPagesFromURLResponse) Version() string { + return pbupfur.rawResponse.Header.Get("x-ms-version") +} + +// XMsContentCrc64 returns the value for header x-ms-content-crc64. +func (pbupfur PageBlobUploadPagesFromURLResponse) XMsContentCrc64() []byte { + s := pbupfur.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// PageBlobUploadPagesResponse ... +type PageBlobUploadPagesResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (pbupr PageBlobUploadPagesResponse) Response() *http.Response { + return pbupr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pbupr PageBlobUploadPagesResponse) StatusCode() int { + return pbupr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pbupr PageBlobUploadPagesResponse) Status() string { + return pbupr.rawResponse.Status +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (pbupr PageBlobUploadPagesResponse) BlobSequenceNumber() int64 { + s := pbupr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (pbupr PageBlobUploadPagesResponse) ClientRequestID() string { + return pbupr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentMD5 returns the value for header Content-MD5. +func (pbupr PageBlobUploadPagesResponse) ContentMD5() []byte { + s := pbupr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (pbupr PageBlobUploadPagesResponse) Date() time.Time { + s := pbupr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (pbupr PageBlobUploadPagesResponse) EncryptionKeySha256() string { + return pbupr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (pbupr PageBlobUploadPagesResponse) EncryptionScope() string { + return pbupr.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pbupr PageBlobUploadPagesResponse) ErrorCode() string { + return pbupr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pbupr PageBlobUploadPagesResponse) ETag() ETag { + return ETag(pbupr.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (pbupr PageBlobUploadPagesResponse) IsServerEncrypted() string { + return pbupr.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (pbupr PageBlobUploadPagesResponse) LastModified() time.Time { + s := pbupr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pbupr PageBlobUploadPagesResponse) RequestID() string { + return pbupr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pbupr PageBlobUploadPagesResponse) Version() string { + return pbupr.rawResponse.Header.Get("x-ms-version") +} + +// XMsContentCrc64 returns the value for header x-ms-content-crc64. +func (pbupr PageBlobUploadPagesResponse) XMsContentCrc64() []byte { + s := pbupr.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// PageList - the list of pages +type PageList struct { + rawResponse *http.Response + PageRange []PageRange `xml:"PageRange"` + ClearRange []ClearRange `xml:"ClearRange"` + NextMarker Marker `xml:"NextMarker"` +} + +// Response returns the raw HTTP response object. +func (pl PageList) Response() *http.Response { + return pl.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pl PageList) StatusCode() int { + return pl.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pl PageList) Status() string { + return pl.rawResponse.Status +} + +// BlobContentLength returns the value for header x-ms-blob-content-length. +func (pl PageList) BlobContentLength() int64 { + s := pl.rawResponse.Header.Get("x-ms-blob-content-length") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (pl PageList) ClientRequestID() string { + return pl.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (pl PageList) Date() time.Time { + s := pl.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pl PageList) ErrorCode() string { + return pl.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pl PageList) ETag() ETag { + return ETag(pl.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (pl PageList) LastModified() time.Time { + s := pl.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pl PageList) RequestID() string { + return pl.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pl PageList) Version() string { + return pl.rawResponse.Header.Get("x-ms-version") +} + +// PageRange ... +type PageRange struct { + Start int64 `xml:"Start"` + End int64 `xml:"End"` +} + +// QueryFormat ... +type QueryFormat struct { + // Type - Possible values include: 'QueryFormatDelimited', 'QueryFormatJSON', 'QueryFormatArrow', 'QueryFormatParquet', 'QueryFormatNone' + Type QueryFormatType `xml:"Type"` + DelimitedTextConfiguration *DelimitedTextConfiguration `xml:"DelimitedTextConfiguration"` + JSONTextConfiguration *JSONTextConfiguration `xml:"JsonTextConfiguration"` + ArrowConfiguration *ArrowConfiguration `xml:"ArrowConfiguration"` + ParquetTextConfiguration map[string]interface{} `xml:"ParquetTextConfiguration"` +} + +// QueryRequest - Groups the set of query request settings. +type QueryRequest struct { + // QueryType - Required. The type of the provided query expression. + QueryType string `xml:"QueryType"` + // Expression - The query expression in SQL. The maximum size of the query expression is 256KiB. + Expression string `xml:"Expression"` + InputSerialization *QuerySerialization `xml:"InputSerialization"` + OutputSerialization *QuerySerialization `xml:"OutputSerialization"` +} + +// QueryResponse - Wraps the response from the blobClient.Query method. +type QueryResponse struct { + rawResponse *http.Response +} + +// NewMetadata returns user-defined key/value pairs. +func (qr QueryResponse) NewMetadata() Metadata { + md := Metadata{} + for k, v := range qr.rawResponse.Header { + if len(k) > mdPrefixLen { + if prefix := k[0:mdPrefixLen]; strings.EqualFold(prefix, mdPrefix) { + md[strings.ToLower(k[mdPrefixLen:])] = v[0] + } + } + } + return md +} + +// Response returns the raw HTTP response object. +func (qr QueryResponse) Response() *http.Response { + return qr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (qr QueryResponse) StatusCode() int { + return qr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (qr QueryResponse) Status() string { + return qr.rawResponse.Status +} + +// Body returns the raw HTTP response object's Body. +func (qr QueryResponse) Body() io.ReadCloser { + return qr.rawResponse.Body +} + +// AcceptRanges returns the value for header Accept-Ranges. +func (qr QueryResponse) AcceptRanges() string { + return qr.rawResponse.Header.Get("Accept-Ranges") +} + +// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. +func (qr QueryResponse) BlobCommittedBlockCount() int32 { + s := qr.rawResponse.Header.Get("x-ms-blob-committed-block-count") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + i = 0 + } + return int32(i) +} + +// BlobContentMD5 returns the value for header x-ms-blob-content-md5. +func (qr QueryResponse) BlobContentMD5() []byte { + s := qr.rawResponse.Header.Get("x-ms-blob-content-md5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (qr QueryResponse) BlobSequenceNumber() int64 { + s := qr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// BlobType returns the value for header x-ms-blob-type. +func (qr QueryResponse) BlobType() BlobType { + return BlobType(qr.rawResponse.Header.Get("x-ms-blob-type")) +} + +// CacheControl returns the value for header Cache-Control. +func (qr QueryResponse) CacheControl() string { + return qr.rawResponse.Header.Get("Cache-Control") +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (qr QueryResponse) ClientRequestID() string { + return qr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentCrc64 returns the value for header x-ms-content-crc64. +func (qr QueryResponse) ContentCrc64() []byte { + s := qr.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// ContentDisposition returns the value for header Content-Disposition. +func (qr QueryResponse) ContentDisposition() string { + return qr.rawResponse.Header.Get("Content-Disposition") +} + +// ContentEncoding returns the value for header Content-Encoding. +func (qr QueryResponse) ContentEncoding() string { + return qr.rawResponse.Header.Get("Content-Encoding") +} + +// ContentLanguage returns the value for header Content-Language. +func (qr QueryResponse) ContentLanguage() string { + return qr.rawResponse.Header.Get("Content-Language") +} + +// ContentLength returns the value for header Content-Length. +func (qr QueryResponse) ContentLength() int64 { + s := qr.rawResponse.Header.Get("Content-Length") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// ContentMD5 returns the value for header Content-MD5. +func (qr QueryResponse) ContentMD5() []byte { + s := qr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// ContentRange returns the value for header Content-Range. +func (qr QueryResponse) ContentRange() string { + return qr.rawResponse.Header.Get("Content-Range") +} + +// ContentType returns the value for header Content-Type. +func (qr QueryResponse) ContentType() string { + return qr.rawResponse.Header.Get("Content-Type") +} + +// CopyCompletionTime returns the value for header x-ms-copy-completion-time. +func (qr QueryResponse) CopyCompletionTime() time.Time { + s := qr.rawResponse.Header.Get("x-ms-copy-completion-time") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// CopyID returns the value for header x-ms-copy-id. +func (qr QueryResponse) CopyID() string { + return qr.rawResponse.Header.Get("x-ms-copy-id") +} + +// CopyProgress returns the value for header x-ms-copy-progress. +func (qr QueryResponse) CopyProgress() string { + return qr.rawResponse.Header.Get("x-ms-copy-progress") +} + +// CopySource returns the value for header x-ms-copy-source. +func (qr QueryResponse) CopySource() string { + return qr.rawResponse.Header.Get("x-ms-copy-source") +} + +// CopyStatus returns the value for header x-ms-copy-status. +func (qr QueryResponse) CopyStatus() CopyStatusType { + return CopyStatusType(qr.rawResponse.Header.Get("x-ms-copy-status")) +} + +// CopyStatusDescription returns the value for header x-ms-copy-status-description. +func (qr QueryResponse) CopyStatusDescription() string { + return qr.rawResponse.Header.Get("x-ms-copy-status-description") +} + +// Date returns the value for header Date. +func (qr QueryResponse) Date() time.Time { + s := qr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (qr QueryResponse) EncryptionKeySha256() string { + return qr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (qr QueryResponse) EncryptionScope() string { + return qr.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (qr QueryResponse) ErrorCode() string { + return qr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (qr QueryResponse) ETag() ETag { + return ETag(qr.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-server-encrypted. +func (qr QueryResponse) IsServerEncrypted() string { + return qr.rawResponse.Header.Get("x-ms-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (qr QueryResponse) LastModified() time.Time { + s := qr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseDuration returns the value for header x-ms-lease-duration. +func (qr QueryResponse) LeaseDuration() LeaseDurationType { + return LeaseDurationType(qr.rawResponse.Header.Get("x-ms-lease-duration")) +} + +// LeaseState returns the value for header x-ms-lease-state. +func (qr QueryResponse) LeaseState() LeaseStateType { + return LeaseStateType(qr.rawResponse.Header.Get("x-ms-lease-state")) +} + +// LeaseStatus returns the value for header x-ms-lease-status. +func (qr QueryResponse) LeaseStatus() LeaseStatusType { + return LeaseStatusType(qr.rawResponse.Header.Get("x-ms-lease-status")) +} + +// RequestID returns the value for header x-ms-request-id. +func (qr QueryResponse) RequestID() string { + return qr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (qr QueryResponse) Version() string { + return qr.rawResponse.Header.Get("x-ms-version") +} + +// QuerySerialization ... +type QuerySerialization struct { + Format QueryFormat `xml:"Format"` +} + +// RetentionPolicy - the retention policy which determines how long the associated data should persist +type RetentionPolicy struct { + // Enabled - Indicates whether a retention policy is enabled for the storage service + Enabled bool `xml:"Enabled"` + // Days - Indicates the number of days that metrics or logging or soft-deleted data should be retained. All data older than this value will be deleted + Days *int32 `xml:"Days"` + // AllowPermanentDelete - Indicates whether permanent delete is allowed on this storage account. + AllowPermanentDelete *bool `xml:"AllowPermanentDelete"` +} + +// ServiceGetAccountInfoResponse ... +type ServiceGetAccountInfoResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (sgair ServiceGetAccountInfoResponse) Response() *http.Response { + return sgair.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (sgair ServiceGetAccountInfoResponse) StatusCode() int { + return sgair.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (sgair ServiceGetAccountInfoResponse) Status() string { + return sgair.rawResponse.Status +} + +// AccountKind returns the value for header x-ms-account-kind. +func (sgair ServiceGetAccountInfoResponse) AccountKind() AccountKindType { + return AccountKindType(sgair.rawResponse.Header.Get("x-ms-account-kind")) +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (sgair ServiceGetAccountInfoResponse) ClientRequestID() string { + return sgair.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (sgair ServiceGetAccountInfoResponse) Date() time.Time { + s := sgair.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (sgair ServiceGetAccountInfoResponse) ErrorCode() string { + return sgair.rawResponse.Header.Get("x-ms-error-code") +} + +// IsHierarchicalNamespaceEnabled returns the value for header x-ms-is-hns-enabled. +func (sgair ServiceGetAccountInfoResponse) IsHierarchicalNamespaceEnabled() string { + return sgair.rawResponse.Header.Get("x-ms-is-hns-enabled") +} + +// RequestID returns the value for header x-ms-request-id. +func (sgair ServiceGetAccountInfoResponse) RequestID() string { + return sgair.rawResponse.Header.Get("x-ms-request-id") +} + +// SkuName returns the value for header x-ms-sku-name. +func (sgair ServiceGetAccountInfoResponse) SkuName() SkuNameType { + return SkuNameType(sgair.rawResponse.Header.Get("x-ms-sku-name")) +} + +// Version returns the value for header x-ms-version. +func (sgair ServiceGetAccountInfoResponse) Version() string { + return sgair.rawResponse.Header.Get("x-ms-version") +} + +// ServiceSetPropertiesResponse ... +type ServiceSetPropertiesResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (sspr ServiceSetPropertiesResponse) Response() *http.Response { + return sspr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (sspr ServiceSetPropertiesResponse) StatusCode() int { + return sspr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (sspr ServiceSetPropertiesResponse) Status() string { + return sspr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (sspr ServiceSetPropertiesResponse) ClientRequestID() string { + return sspr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (sspr ServiceSetPropertiesResponse) ErrorCode() string { + return sspr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (sspr ServiceSetPropertiesResponse) RequestID() string { + return sspr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (sspr ServiceSetPropertiesResponse) Version() string { + return sspr.rawResponse.Header.Get("x-ms-version") +} + +// SignedIdentifier - signed identifier +type SignedIdentifier struct { + // ID - a unique id + ID string `xml:"Id"` + AccessPolicy AccessPolicy `xml:"AccessPolicy"` +} + +// SignedIdentifiers - Wraps the response from the containerClient.GetAccessPolicy method. +type SignedIdentifiers struct { + rawResponse *http.Response + Items []SignedIdentifier `xml:"SignedIdentifier"` +} + +// Response returns the raw HTTP response object. +func (si SignedIdentifiers) Response() *http.Response { + return si.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (si SignedIdentifiers) StatusCode() int { + return si.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (si SignedIdentifiers) Status() string { + return si.rawResponse.Status +} + +// BlobPublicAccess returns the value for header x-ms-blob-public-access. +func (si SignedIdentifiers) BlobPublicAccess() PublicAccessType { + return PublicAccessType(si.rawResponse.Header.Get("x-ms-blob-public-access")) +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (si SignedIdentifiers) ClientRequestID() string { + return si.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (si SignedIdentifiers) Date() time.Time { + s := si.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (si SignedIdentifiers) ErrorCode() string { + return si.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (si SignedIdentifiers) ETag() ETag { + return ETag(si.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (si SignedIdentifiers) LastModified() time.Time { + s := si.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (si SignedIdentifiers) RequestID() string { + return si.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (si SignedIdentifiers) Version() string { + return si.rawResponse.Header.Get("x-ms-version") +} + +// StaticWebsite - The properties that enable an account to host a static website +type StaticWebsite struct { + // Enabled - Indicates whether this account is hosting a static website + Enabled bool `xml:"Enabled"` + // IndexDocument - The default name of the index page under each directory + IndexDocument *string `xml:"IndexDocument"` + // ErrorDocument404Path - The absolute path of the custom 404 page + ErrorDocument404Path *string `xml:"ErrorDocument404Path"` + // DefaultIndexDocumentPath - Absolute path of the default index page + DefaultIndexDocumentPath *string `xml:"DefaultIndexDocumentPath"` +} + +// // StorageError ... +// type StorageError struct { +// Message *string `xml:"Message"` +// } + +// StorageServiceProperties - Storage Service Properties. +type StorageServiceProperties struct { + rawResponse *http.Response + Logging *Logging `xml:"Logging"` + HourMetrics *Metrics `xml:"HourMetrics"` + MinuteMetrics *Metrics `xml:"MinuteMetrics"` + // Cors - The set of CORS rules. + Cors []CorsRule `xml:"Cors>CorsRule"` + // DefaultServiceVersion - The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible values include version 2008-10-27 and all more recent versions + DefaultServiceVersion *string `xml:"DefaultServiceVersion"` + DeleteRetentionPolicy *RetentionPolicy `xml:"DeleteRetentionPolicy"` + StaticWebsite *StaticWebsite `xml:"StaticWebsite"` +} + +// Response returns the raw HTTP response object. +func (ssp StorageServiceProperties) Response() *http.Response { + return ssp.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (ssp StorageServiceProperties) StatusCode() int { + return ssp.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (ssp StorageServiceProperties) Status() string { + return ssp.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (ssp StorageServiceProperties) ClientRequestID() string { + return ssp.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (ssp StorageServiceProperties) ErrorCode() string { + return ssp.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (ssp StorageServiceProperties) RequestID() string { + return ssp.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (ssp StorageServiceProperties) Version() string { + return ssp.rawResponse.Header.Get("x-ms-version") +} + +// StorageServiceStats - Stats for the storage service. +type StorageServiceStats struct { + rawResponse *http.Response + GeoReplication *GeoReplication `xml:"GeoReplication"` +} + +// Response returns the raw HTTP response object. +func (sss StorageServiceStats) Response() *http.Response { + return sss.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (sss StorageServiceStats) StatusCode() int { + return sss.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (sss StorageServiceStats) Status() string { + return sss.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (sss StorageServiceStats) ClientRequestID() string { + return sss.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (sss StorageServiceStats) Date() time.Time { + s := sss.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (sss StorageServiceStats) ErrorCode() string { + return sss.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (sss StorageServiceStats) RequestID() string { + return sss.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (sss StorageServiceStats) Version() string { + return sss.rawResponse.Header.Get("x-ms-version") +} + +// SubmitBatchResponse - Wraps the response from the containerClient.SubmitBatch method. +type SubmitBatchResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (sbr SubmitBatchResponse) Response() *http.Response { + return sbr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (sbr SubmitBatchResponse) StatusCode() int { + return sbr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (sbr SubmitBatchResponse) Status() string { + return sbr.rawResponse.Status +} + +// Body returns the raw HTTP response object's Body. +func (sbr SubmitBatchResponse) Body() io.ReadCloser { + return sbr.rawResponse.Body +} + +// ContentType returns the value for header Content-Type. +func (sbr SubmitBatchResponse) ContentType() string { + return sbr.rawResponse.Header.Get("Content-Type") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (sbr SubmitBatchResponse) ErrorCode() string { + return sbr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (sbr SubmitBatchResponse) RequestID() string { + return sbr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (sbr SubmitBatchResponse) Version() string { + return sbr.rawResponse.Header.Get("x-ms-version") +} + +// UserDelegationKey - A user delegation key +type UserDelegationKey struct { + rawResponse *http.Response + // SignedOid - The Azure Active Directory object ID in GUID format. + SignedOid string `xml:"SignedOid"` + // SignedTid - The Azure Active Directory tenant ID in GUID format + SignedTid string `xml:"SignedTid"` + // SignedStart - The date-time the key is active + SignedStart time.Time `xml:"SignedStart"` + // SignedExpiry - The date-time the key expires + SignedExpiry time.Time `xml:"SignedExpiry"` + // SignedService - Abbreviation of the Azure Storage service that accepts the key + SignedService string `xml:"SignedService"` + // SignedVersion - The service version that created the key + SignedVersion string `xml:"SignedVersion"` + // Value - The key as a base64 string + Value string `xml:"Value"` +} + +// MarshalXML implements the xml.Marshaler interface for UserDelegationKey. +func (udk UserDelegationKey) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + udk2 := (*userDelegationKey)(unsafe.Pointer(&udk)) + return e.EncodeElement(*udk2, start) +} + +// UnmarshalXML implements the xml.Unmarshaler interface for UserDelegationKey. +func (udk *UserDelegationKey) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + udk2 := (*userDelegationKey)(unsafe.Pointer(udk)) + return d.DecodeElement(udk2, &start) +} + +// Response returns the raw HTTP response object. +func (udk UserDelegationKey) Response() *http.Response { + return udk.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (udk UserDelegationKey) StatusCode() int { + return udk.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (udk UserDelegationKey) Status() string { + return udk.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (udk UserDelegationKey) ClientRequestID() string { + return udk.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (udk UserDelegationKey) Date() time.Time { + s := udk.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (udk UserDelegationKey) ErrorCode() string { + return udk.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (udk UserDelegationKey) RequestID() string { + return udk.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (udk UserDelegationKey) Version() string { + return udk.rawResponse.Header.Get("x-ms-version") +} + +func init() { + if reflect.TypeOf((*UserDelegationKey)(nil)).Elem().Size() != reflect.TypeOf((*userDelegationKey)(nil)).Elem().Size() { + validateError(errors.New("size mismatch between UserDelegationKey and userDelegationKey")) + } + if reflect.TypeOf((*AccessPolicy)(nil)).Elem().Size() != reflect.TypeOf((*accessPolicy)(nil)).Elem().Size() { + validateError(errors.New("size mismatch between AccessPolicy and accessPolicy")) + } + if reflect.TypeOf((*BlobPropertiesInternal)(nil)).Elem().Size() != reflect.TypeOf((*blobPropertiesInternal)(nil)).Elem().Size() { + validateError(errors.New("size mismatch between BlobPropertiesInternal and blobPropertiesInternal")) + } + if reflect.TypeOf((*ContainerProperties)(nil)).Elem().Size() != reflect.TypeOf((*containerProperties)(nil)).Elem().Size() { + validateError(errors.New("size mismatch between ContainerProperties and containerProperties")) + } + if reflect.TypeOf((*GeoReplication)(nil)).Elem().Size() != reflect.TypeOf((*geoReplication)(nil)).Elem().Size() { + validateError(errors.New("size mismatch between GeoReplication and geoReplication")) + } +} + +const ( + rfc3339Format = "2006-01-02T15:04:05Z" +) + +// used to convert times from UTC to GMT before sending across the wire +var gmt = time.FixedZone("GMT", 0) + +// internal type used for marshalling time in RFC1123 format +type timeRFC1123 struct { + time.Time +} + +// MarshalText implements the encoding.TextMarshaler interface for timeRFC1123. +func (t timeRFC1123) MarshalText() ([]byte, error) { + return []byte(t.Format(time.RFC1123)), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface for timeRFC1123. +func (t *timeRFC1123) UnmarshalText(data []byte) (err error) { + t.Time, err = time.Parse(time.RFC1123, string(data)) + return +} + +// internal type used for marshalling time in RFC3339 format +type timeRFC3339 struct { + time.Time +} + +// MarshalText implements the encoding.TextMarshaler interface for timeRFC3339. +func (t timeRFC3339) MarshalText() ([]byte, error) { + return []byte(t.Format(rfc3339Format)), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface for timeRFC3339. +func (t *timeRFC3339) UnmarshalText(data []byte) (err error) { + t.Time, err = time.Parse(rfc3339Format, string(data)) + return +} + +// internal type used for marshalling base64 encoded strings +type base64Encoded struct { + b []byte +} + +// MarshalText implements the encoding.TextMarshaler interface for base64Encoded. +func (c base64Encoded) MarshalText() ([]byte, error) { + return []byte(base64.StdEncoding.EncodeToString(c.b)), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface for base64Encoded. +func (c *base64Encoded) UnmarshalText(data []byte) error { + b, err := base64.StdEncoding.DecodeString(string(data)) + if err != nil { + return err + } + c.b = b + return nil +} + +// internal type used for marshalling +type userDelegationKey struct { + rawResponse *http.Response + SignedOid string `xml:"SignedOid"` + SignedTid string `xml:"SignedTid"` + SignedStart timeRFC3339 `xml:"SignedStart"` + SignedExpiry timeRFC3339 `xml:"SignedExpiry"` + SignedService string `xml:"SignedService"` + SignedVersion string `xml:"SignedVersion"` + Value string `xml:"Value"` +} + +// internal type used for marshalling +type accessPolicy struct { + Start *timeRFC3339 `xml:"Start"` + Expiry *timeRFC3339 `xml:"Expiry"` + Permission *string `xml:"Permission"` +} + +// internal type used for marshalling +type blobPropertiesInternal struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Properties"` + CreationTime *timeRFC1123 `xml:"Creation-Time"` + LastModified timeRFC1123 `xml:"Last-Modified"` + Etag ETag `xml:"Etag"` + ContentLength *int64 `xml:"Content-Length"` + ContentType *string `xml:"Content-Type"` + ContentEncoding *string `xml:"Content-Encoding"` + ContentLanguage *string `xml:"Content-Language"` + ContentMD5 base64Encoded `xml:"Content-MD5"` + ContentDisposition *string `xml:"Content-Disposition"` + CacheControl *string `xml:"Cache-Control"` + BlobSequenceNumber *int64 `xml:"x-ms-blob-sequence-number"` + BlobType BlobType `xml:"BlobType"` + LeaseStatus LeaseStatusType `xml:"LeaseStatus"` + LeaseState LeaseStateType `xml:"LeaseState"` + LeaseDuration LeaseDurationType `xml:"LeaseDuration"` + CopyID *string `xml:"CopyId"` + CopyStatus CopyStatusType `xml:"CopyStatus"` + CopySource *string `xml:"CopySource"` + CopyProgress *string `xml:"CopyProgress"` + CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` + CopyStatusDescription *string `xml:"CopyStatusDescription"` + ServerEncrypted *bool `xml:"ServerEncrypted"` + IncrementalCopy *bool `xml:"IncrementalCopy"` + DestinationSnapshot *string `xml:"DestinationSnapshot"` + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` + AccessTier AccessTierType `xml:"AccessTier"` + AccessTierInferred *bool `xml:"AccessTierInferred"` + ArchiveStatus ArchiveStatusType `xml:"ArchiveStatus"` + CustomerProvidedKeySha256 *string `xml:"CustomerProvidedKeySha256"` + EncryptionScope *string `xml:"EncryptionScope"` + AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` + TagCount *int32 `xml:"TagCount"` + ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` + IsSealed *bool `xml:"Sealed"` + RehydratePriority RehydratePriorityType `xml:"RehydratePriority"` + LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` + ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` + ImmutabilityPolicyMode BlobImmutabilityPolicyModeType `xml:"ImmutabilityPolicyMode"` + LegalHold *bool `xml:"LegalHold"` + Owner *string `xml:"Owner"` + Group *string `xml:"Group"` + Permissions *string `xml:"Permissions"` + ACL *string `xml:"Acl"` +} + +// internal type used for marshalling +type containerProperties struct { + LastModified timeRFC1123 `xml:"Last-Modified"` + Etag ETag `xml:"Etag"` + LeaseStatus LeaseStatusType `xml:"LeaseStatus"` + LeaseState LeaseStateType `xml:"LeaseState"` + LeaseDuration LeaseDurationType `xml:"LeaseDuration"` + PublicAccess PublicAccessType `xml:"PublicAccess"` + HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"` + HasLegalHold *bool `xml:"HasLegalHold"` + DefaultEncryptionScope *string `xml:"DefaultEncryptionScope"` + PreventEncryptionScopeOverride *bool `xml:"DenyEncryptionScopeOverride"` + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` + IsImmutableStorageWithVersioningEnabled *bool `xml:"ImmutableStorageWithVersioningEnabled"` +} + +// internal type used for marshalling +type geoReplication struct { + Status GeoReplicationStatusType `xml:"Status"` + LastSyncTime timeRFC1123 `xml:"LastSyncTime"` +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go new file mode 100644 index 000000000..01a81fdfb --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go @@ -0,0 +1,1065 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "encoding/base64" + "encoding/xml" + "github.com/Azure/azure-pipeline-go/pipeline" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" +) + +// pageBlobClient is the client for the PageBlob methods of the Azblob service. +type pageBlobClient struct { + managementClient +} + +// newPageBlobClient creates an instance of the pageBlobClient client. +func newPageBlobClient(url url.URL, p pipeline.Pipeline) pageBlobClient { + return pageBlobClient{newManagementClient(url, p)} +} + +// ClearPages the Clear Pages operation clears a set of pages from a page blob +// +// contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more +// information, see Setting +// Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified +// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. +// encryptionKey is optional. Specifies the encryption key to use to encrypt the data provided in the request. If not +// specified, encryption is performed with the root account encryption key. For more information, see Encryption at +// Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be +// provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the +// encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key +// header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the name of the encryption +// scope to use to encrypt the data provided in the request. If not specified, encryption is performed with the default +// account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. +// ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number +// less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob +// if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate +// only on a blob if it has the specified sequence number. ifModifiedSince is specify this header value to operate only +// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching +// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the +// analytics logs when storage analytics logging is enabled. +func (client pageBlobClient) ClearPages(ctx context.Context, contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*PageBlobClearPagesResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.clearPagesPreparer(contentLength, timeout, rangeParameter, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.clearPagesResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageBlobClearPagesResponse), err +} + +// clearPagesPreparer prepares the ClearPages request. +func (client pageBlobClient) clearPagesPreparer(contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "page") + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if rangeParameter != nil { + req.Header.Set("x-ms-range", *rangeParameter) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if ifSequenceNumberLessThanOrEqualTo != nil { + req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10)) + } + if ifSequenceNumberLessThan != nil { + req.Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*ifSequenceNumberLessThan, 10)) + } + if ifSequenceNumberEqualTo != nil { + req.Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*ifSequenceNumberEqualTo, 10)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-page-write", "clear") + return req, nil +} + +// clearPagesResponder handles the response to the ClearPages request. +func (client pageBlobClient) clearPagesResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &PageBlobClearPagesResponse{rawResponse: resp.Response()}, err +} + +// CopyIncremental the Copy Incremental operation copies a snapshot of the source page blob to a destination page blob. +// The snapshot is copied such that only the differential changes between the previously copied snapshot are +// transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read or +// copied from as usual. This API is supported since REST version 2016-05-31. +// +// copySource is specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that +// specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob +// must either be public or must be authenticated via a shared access signature. timeout is the timeout parameter is +// expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate +// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a +// matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. +// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics +// logs when storage analytics logging is enabled. +func (client pageBlobClient) CopyIncremental(ctx context.Context, copySource string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*PageBlobCopyIncrementalResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.copyIncrementalPreparer(copySource, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.copyIncrementalResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageBlobCopyIncrementalResponse), err +} + +// copyIncrementalPreparer prepares the CopyIncremental request. +func (client pageBlobClient) copyIncrementalPreparer(copySource string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "incrementalcopy") + req.URL.RawQuery = params.Encode() + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-copy-source", copySource) + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// copyIncrementalResponder handles the response to the CopyIncremental request. +func (client pageBlobClient) copyIncrementalResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &PageBlobCopyIncrementalResponse{rawResponse: resp.Response()}, err +} + +// Create the Create operation creates a new page blob. +// +// contentLength is the length of the request. blobContentLength is this header specifies the maximum size for the page +// blob, up to 1 TB. The page blob size must be aligned to a 512-byte boundary. timeout is the timeout parameter is +// expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. tier is optional. Indicates the tier to be set on the page blob. +// blobContentType is optional. Sets the blob's content type. If specified, this property is stored with the blob and +// returned with a read request. blobContentEncoding is optional. Sets the blob's content encoding. If specified, this +// property is stored with the blob and returned with a read request. blobContentLanguage is optional. Set the blob's +// content language. If specified, this property is stored with the blob and returned with a read request. +// blobContentMD5 is optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for +// the individual blocks were validated when each was uploaded. blobCacheControl is optional. Sets the blob's cache +// control. If specified, this property is stored with the blob and returned with a read request. metadata is optional. +// Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the +// operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value +// pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from +// the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules +// for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. leaseID is if +// specified, the operation only succeeds if the resource's lease is active and matches this ID. blobContentDisposition +// is optional. Sets the blob's Content-Disposition header. encryptionKey is optional. Specifies the encryption key to +// use to encrypt the data provided in the request. If not specified, encryption is performed with the root account +// encryption key. For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the +// SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. +// encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the only accepted value is +// "AES256". Must be provided if the x-ms-encryption-key header is provided. encryptionScope is optional. Version +// 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided in the +// request. If not specified, encryption is performed with the default account encryption scope. For more information, +// see Encryption at Rest for Azure Storage Services. ifModifiedSince is specify this header value to operate only on a +// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching +// value. blobSequenceNumber is set for page blobs only. The sequence number is a user-controlled value that you can +// use to track requests. The value of the sequence number must be between 0 and 2^63 - 1. requestID is provides a +// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. blobTagsString is optional. Used to set blob tags in various blob operations. +// immutabilityPolicyExpiry is specifies the date time when the blobs immutability policy is set to expire. +// immutabilityPolicyMode is specifies the immutability policy mode to set on the blob. legalHold is specified if a +// legal hold should be set on the blob. +func (client pageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, timeout *int32, tier PremiumPageBlobAccessTierType, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, blobSequenceNumber *int64, requestID *string, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (*PageBlobCreateResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.createPreparer(contentLength, blobContentLength, timeout, tier, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, blobSequenceNumber, requestID, blobTagsString, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageBlobCreateResponse), err +} + +// createPreparer prepares the Create request. +func (client pageBlobClient) createPreparer(contentLength int64, blobContentLength int64, timeout *int32, tier PremiumPageBlobAccessTierType, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, blobSequenceNumber *int64, requestID *string, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if tier != PremiumPageBlobAccessTierNone { + req.Header.Set("x-ms-access-tier", string(tier)) + } + if blobContentType != nil { + req.Header.Set("x-ms-blob-content-type", *blobContentType) + } + if blobContentEncoding != nil { + req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) + } + if blobContentLanguage != nil { + req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) + } + if blobContentMD5 != nil { + req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) + } + if blobCacheControl != nil { + req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) + } + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if blobContentDisposition != nil { + req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10)) + if blobSequenceNumber != nil { + req.Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*blobSequenceNumber, 10)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if blobTagsString != nil { + req.Header.Set("x-ms-tags", *blobTagsString) + } + if immutabilityPolicyExpiry != nil { + req.Header.Set("x-ms-immutability-policy-until-date", (*immutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)) + } + if immutabilityPolicyMode != BlobImmutabilityPolicyModeNone { + req.Header.Set("x-ms-immutability-policy-mode", string(immutabilityPolicyMode)) + } + if legalHold != nil { + req.Header.Set("x-ms-legal-hold", strconv.FormatBool(*legalHold)) + } + req.Header.Set("x-ms-blob-type", "PageBlob") + return req, nil +} + +// createResponder handles the response to the Create request. +func (client pageBlobClient) createResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &PageBlobCreateResponse{rawResponse: resp.Response()}, err +} + +// GetPageRanges the Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot of a +// page blob +// +// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to +// retrieve. For more information on working with blob snapshots, see Creating +// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified +// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. +// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified +// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified +// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. +// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a SQL +// where clause on blob tags to operate only on blobs with a matching value. requestID is provides a client-generated, +// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is +// enabled. +func (client pageBlobClient) GetPageRanges(ctx context.Context, snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*PageList, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getPageRangesPreparer(snapshot, timeout, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPageRangesResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageList), err +} + +// getPageRangesPreparer prepares the GetPageRanges request. +func (client pageBlobClient) getPageRangesPreparer(snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "pagelist") + req.URL.RawQuery = params.Encode() + if rangeParameter != nil { + req.Header.Set("x-ms-range", *rangeParameter) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getPageRangesResponder handles the response to the GetPageRanges request. +func (client pageBlobClient) getPageRangesResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &PageList{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// GetPageRangesDiff the Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that were +// changed between target blob and previous snapshot. +// +// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to +// retrieve. For more information on working with blob snapshots, see Creating +// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. prevsnapshot is optional in version 2015-07-08 and newer. The prevsnapshot +// parameter is a DateTime value that specifies that the response will contain only pages that were changed between +// target blob and previous snapshot. Changed pages include both updated and cleared pages. The target blob may be a +// snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. Note that incremental snapshots +// are currently supported only for blobs created on or after January 1, 2016. prevSnapshotURL is optional. This header +// is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot of the +// target blob. The response will only contain pages that were changed between the target blob and its previous +// snapshot. rangeParameter is return only the bytes of the blob in the specified range. leaseID is if specified, the +// operation only succeeds if the resource's lease is active and matches this ID. ifModifiedSince is specify this +// header value to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is +// specify this header value to operate only on a blob if it has not been modified since the specified date/time. +// ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag +// value to operate only on blobs without a matching value. ifTags is specify a SQL where clause on blob tags to +// operate only on blobs with a matching value. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client pageBlobClient) GetPageRangesDiff(ctx context.Context, snapshot *string, timeout *int32, prevsnapshot *string, prevSnapshotURL *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*PageList, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getPageRangesDiffPreparer(snapshot, timeout, prevsnapshot, prevSnapshotURL, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPageRangesDiffResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageList), err +} + +// getPageRangesDiffPreparer prepares the GetPageRangesDiff request. +func (client pageBlobClient) getPageRangesDiffPreparer(snapshot *string, timeout *int32, prevsnapshot *string, prevSnapshotURL *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + if prevsnapshot != nil && len(*prevsnapshot) > 0 { + params.Set("prevsnapshot", *prevsnapshot) + } + params.Set("comp", "pagelist") + req.URL.RawQuery = params.Encode() + if prevSnapshotURL != nil { + req.Header.Set("x-ms-previous-snapshot-url", *prevSnapshotURL) + } + if rangeParameter != nil { + req.Header.Set("x-ms-range", *rangeParameter) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getPageRangesDiffResponder handles the response to the GetPageRangesDiff request. +func (client pageBlobClient) getPageRangesDiffResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &PageList{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// Resize resize the Blob +// +// blobContentLength is this header specifies the maximum size for the page blob, up to 1 TB. The page blob size must +// be aligned to a 512-byte boundary. timeout is the timeout parameter is expressed in seconds. For more information, +// see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. encryptionKey is optional. Specifies the encryption key to use to encrypt the +// data provided in the request. If not specified, encryption is performed with the root account encryption key. For +// more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the +// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the +// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided +// if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies +// the name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is +// performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage +// Services. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the +// specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been +// modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching +// value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a +// SQL where clause on blob tags to operate only on blobs with a matching value. requestID is provides a +// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. +func (client pageBlobClient) Resize(ctx context.Context, blobContentLength int64, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*PageBlobResizeResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.resizePreparer(blobContentLength, timeout, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.resizeResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageBlobResizeResponse), err +} + +// resizePreparer prepares the Resize request. +func (client pageBlobClient) resizePreparer(blobContentLength int64, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "properties") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10)) + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// resizeResponder handles the response to the Resize request. +func (client pageBlobClient) resizeResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &PageBlobResizeResponse{rawResponse: resp.Response()}, err +} + +// UpdateSequenceNumber update the sequence number of the blob +// +// sequenceNumberAction is required if the x-ms-blob-sequence-number header is set for the request. This property +// applies to page blobs only. This property indicates how the service should modify the blob's sequence number timeout +// is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it +// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a +// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on +// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. +// ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. blobSequenceNumber +// is set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The +// value of the sequence number must be between 0 and 2^63 - 1. requestID is provides a client-generated, opaque value +// with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client pageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, blobSequenceNumber *int64, requestID *string) (*PageBlobUpdateSequenceNumberResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.updateSequenceNumberPreparer(sequenceNumberAction, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, blobSequenceNumber, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.updateSequenceNumberResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageBlobUpdateSequenceNumberResponse), err +} + +// updateSequenceNumberPreparer prepares the UpdateSequenceNumber request. +func (client pageBlobClient) updateSequenceNumberPreparer(sequenceNumberAction SequenceNumberActionType, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "properties") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-sequence-number-action", string(sequenceNumberAction)) + if blobSequenceNumber != nil { + req.Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*blobSequenceNumber, 10)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// updateSequenceNumberResponder handles the response to the UpdateSequenceNumber request. +func (client pageBlobClient) updateSequenceNumberResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &PageBlobUpdateSequenceNumberResponse{rawResponse: resp.Response()}, err +} + +// UploadPages the Upload Pages operation writes a range of pages to a page blob +// +// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an +// error.contentLength is the length of the request. transactionalContentMD5 is specify the transactional md5 for the +// body, to be validated by the service. transactionalContentCrc64 is specify the transactional crc64 for the body, to +// be validated by the service. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified +// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. +// encryptionKey is optional. Specifies the encryption key to use to encrypt the data provided in the request. If not +// specified, encryption is performed with the root account encryption key. For more information, see Encryption at +// Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be +// provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the +// encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key +// header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the name of the encryption +// scope to use to encrypt the data provided in the request. If not specified, encryption is performed with the default +// account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. +// ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number +// less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob +// if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate +// only on a blob if it has the specified sequence number. ifModifiedSince is specify this header value to operate only +// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching +// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the +// analytics logs when storage analytics logging is enabled. +func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*PageBlobUploadPagesResponse, error) { + if err := validate([]validation{ + {targetValue: body, + constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.uploadPagesPreparer(body, contentLength, transactionalContentMD5, transactionalContentCrc64, timeout, rangeParameter, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadPagesResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageBlobUploadPagesResponse), err +} + +// uploadPagesPreparer prepares the UploadPages request. +func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, body) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "page") + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if transactionalContentMD5 != nil { + req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) + } + if transactionalContentCrc64 != nil { + req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64)) + } + if rangeParameter != nil { + req.Header.Set("x-ms-range", *rangeParameter) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if ifSequenceNumberLessThanOrEqualTo != nil { + req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10)) + } + if ifSequenceNumberLessThan != nil { + req.Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*ifSequenceNumberLessThan, 10)) + } + if ifSequenceNumberEqualTo != nil { + req.Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*ifSequenceNumberEqualTo, 10)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-page-write", "update") + return req, nil +} + +// uploadPagesResponder handles the response to the UploadPages request. +func (client pageBlobClient) uploadPagesResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &PageBlobUploadPagesResponse{rawResponse: resp.Response()}, err +} + +// UploadPagesFromURL the Upload Pages operation writes a range of pages to a page blob where the contents are read +// from a URL +// +// sourceURL is specify a URL to the copy source. sourceRange is bytes of source data in the specified range. The +// length of this range should match the ContentLength header and x-ms-range/Range destination range header. +// contentLength is the length of the request. rangeParameter is the range of bytes to which the source range would be +// written. The range should be 512 aligned and range-end is required. sourceContentMD5 is specify the md5 calculated +// for the range of bytes that must be read from the copy source. sourceContentcrc64 is specify the crc64 calculated +// for the range of bytes that must be read from the copy source. timeout is the timeout parameter is expressed in +// seconds. For more information, see Setting +// Timeouts for Blob Service Operations. encryptionKey is optional. Specifies the encryption key to use to encrypt +// the data provided in the request. If not specified, encryption is performed with the root account encryption key. +// For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of +// the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is +// the algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be +// provided if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. +// Specifies the name of the encryption scope to use to encrypt the data provided in the request. If not specified, +// encryption is performed with the default account encryption scope. For more information, see Encryption at Rest for +// Azure Storage Services. leaseID is if specified, the operation only succeeds if the resource's lease is active and +// matches this ID. ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has +// a sequence number less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to +// operate only on a blob if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this +// header value to operate only on a blob if it has the specified sequence number. ifModifiedSince is specify this +// header value to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is +// specify this header value to operate only on a blob if it has not been modified since the specified date/time. +// ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag +// value to operate only on blobs without a matching value. ifTags is specify a SQL where clause on blob tags to +// operate only on blobs with a matching value. sourceIfModifiedSince is specify this header value to operate only on a +// blob if it has been modified since the specified date/time. sourceIfUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. sourceIfMatch is specify an ETag +// value to operate only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate only on +// blobs without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit +// that is recorded in the analytics logs when storage analytics logging is enabled. copySourceAuthorization is only +// Bearer type is supported. Credentials should be a valid OAuth access token to copy source. +func (client pageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string, copySourceAuthorization *string) (*PageBlobUploadPagesFromURLResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.uploadPagesFromURLPreparer(sourceURL, sourceRange, contentLength, rangeParameter, sourceContentMD5, sourceContentcrc64, timeout, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID, copySourceAuthorization) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadPagesFromURLResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageBlobUploadPagesFromURLResponse), err +} + +// uploadPagesFromURLPreparer prepares the UploadPagesFromURL request. +func (client pageBlobClient) uploadPagesFromURLPreparer(sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string, copySourceAuthorization *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "page") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-copy-source", sourceURL) + req.Header.Set("x-ms-source-range", sourceRange) + if sourceContentMD5 != nil { + req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) + } + if sourceContentcrc64 != nil { + req.Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(sourceContentcrc64)) + } + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + req.Header.Set("x-ms-range", rangeParameter) + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifSequenceNumberLessThanOrEqualTo != nil { + req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10)) + } + if ifSequenceNumberLessThan != nil { + req.Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*ifSequenceNumberLessThan, 10)) + } + if ifSequenceNumberEqualTo != nil { + req.Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*ifSequenceNumberEqualTo, 10)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + if sourceIfModifiedSince != nil { + req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) + } + if sourceIfUnmodifiedSince != nil { + req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if sourceIfMatch != nil { + req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) + } + if sourceIfNoneMatch != nil { + req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if copySourceAuthorization != nil { + req.Header.Set("x-ms-copy-source-authorization", *copySourceAuthorization) + } + req.Header.Set("x-ms-page-write", "update") + return req, nil +} + +// uploadPagesFromURLResponder handles the response to the UploadPagesFromURL request. +func (client pageBlobClient) uploadPagesFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &PageBlobUploadPagesFromURLResponse{rawResponse: resp.Response()}, err +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_responder_policy.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_responder_policy.go new file mode 100644 index 000000000..8a023d0a0 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_responder_policy.go @@ -0,0 +1,74 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "bytes" + "context" + "encoding/xml" + "github.com/Azure/azure-pipeline-go/pipeline" + "io/ioutil" +) + +type responder func(resp pipeline.Response) (result pipeline.Response, err error) + +// ResponderPolicyFactory is a Factory capable of creating a responder pipeline. +type responderPolicyFactory struct { + responder responder +} + +// New creates a responder policy factory. +func (arpf responderPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { + return responderPolicy{next: next, responder: arpf.responder} +} + +type responderPolicy struct { + next pipeline.Policy + responder responder +} + +// Do sends the request to the service and validates/deserializes the HTTP response. +func (arp responderPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + resp, err := arp.next.Do(ctx, request) + if err != nil { + return resp, err + } + return arp.responder(resp) +} + +// validateResponse checks an HTTP response's status code against a legal set of codes. +// If the response code is not legal, then validateResponse reads all of the response's body +// (containing error information) and returns a response error. +func validateResponse(resp pipeline.Response, successStatusCodes ...int) error { + if resp == nil { + return NewResponseError(nil, nil, "nil response") + } + responseCode := resp.Response().StatusCode + for _, i := range successStatusCodes { + if i == responseCode { + return nil + } + } + // only close the body in the failure case. in the + // success case responders will close the body as required. + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return err + } + // the service code, description and details will be populated during unmarshalling + responseError := NewResponseError(nil, resp.Response(), resp.Response().Status) + if len(b) > 0 { + if err = xml.Unmarshal(b, &responseError); err != nil { + return NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return responseError +} + +// removes any BOM from the byte slice +func removeBOM(b []byte) []byte { + // UTF8 + return bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_response_error.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_response_error.go new file mode 100644 index 000000000..3dcc75bb5 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_response_error.go @@ -0,0 +1,95 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "bytes" + "fmt" + "github.com/Azure/azure-pipeline-go/pipeline" + "net" + "net/http" +) + +// if you want to provide custom error handling set this variable to your constructor function +var responseErrorFactory func(cause error, response *http.Response, description string) error + +// ResponseError identifies a responder-generated network or response parsing error. +type ResponseError interface { + // Error exposes the Error(), Temporary() and Timeout() methods. + net.Error // Includes the Go error interface + // Response returns the HTTP response. You may examine this but you should not modify it. + Response() *http.Response +} + +// NewResponseError creates an error object that implements the error interface. +func NewResponseError(cause error, response *http.Response, description string) error { + if responseErrorFactory != nil { + return responseErrorFactory(cause, response, description) + } + return &responseError{ + ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3), + response: response, + description: description, + } +} + +// responseError is the internal struct that implements the public ResponseError interface. +type responseError struct { + pipeline.ErrorNode // This is embedded so that responseError "inherits" Error, Temporary, Timeout, and Cause + response *http.Response + description string +} + +// Error implements the error interface's Error method to return a string representation of the error. +func (e *responseError) Error() string { + b := &bytes.Buffer{} + fmt.Fprintf(b, "===== RESPONSE ERROR (Code=%v) =====\n", e.response.StatusCode) + fmt.Fprintf(b, "Status=%s, Description: %s\n", e.response.Status, e.description) + s := b.String() + return e.ErrorNode.Error(s) +} + +// Response implements the ResponseError interface's method to return the HTTP response. +func (e *responseError) Response() *http.Response { + return e.response +} + +// RFC7807 PROBLEM ------------------------------------------------------------------------------------ +// RFC7807Problem ... This type can be publicly embedded in another type that wants to add additional members. +/*type RFC7807Problem struct { + // Mandatory: A (relative) URI reference identifying the problem type (it MAY refer to human-readable documentation). + typeURI string // Should default to "about:blank" + // Optional: Short, human-readable summary (maybe localized). + title string + // Optional: HTTP status code generated by the origin server + status int + // Optional: Human-readable explanation for this problem occurance. + // Should help client correct the problem. Clients should NOT parse this string. + detail string + // Optional: A (relative) URI identifying this specific problem occurence (it may or may not be dereferenced). + instance string +} +// NewRFC7807Problem ... +func NewRFC7807Problem(typeURI string, status int, titleFormat string, a ...interface{}) error { + return &RFC7807Problem{ + typeURI: typeURI, + status: status, + title: fmt.Sprintf(titleFormat, a...), + } +} +// Error returns the error information as a string. +func (e *RFC7807Problem) Error() string { + return e.title +} +// TypeURI ... +func (e *RFC7807Problem) TypeURI() string { + if e.typeURI == "" { + e.typeURI = "about:blank" + } + return e.typeURI +} +// Members ... +func (e *RFC7807Problem) Members() (status int, title, detail, instance string) { + return e.status, e.title, e.detail, e.instance +}*/ diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go new file mode 100644 index 000000000..3072da0e6 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go @@ -0,0 +1,618 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "bytes" + "context" + "encoding/xml" + "github.com/Azure/azure-pipeline-go/pipeline" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" +) + +// serviceClient is the client for the Service methods of the Azblob service. +type serviceClient struct { + managementClient +} + +// newServiceClient creates an instance of the serviceClient client. +func newServiceClient(url url.URL, p pipeline.Pipeline) serviceClient { + return serviceClient{newManagementClient(url, p)} +} + +// FilterBlobs the Filter Blobs operation enables callers to list blobs across all containers whose tags match a given +// search expression. Filter blobs searches across all containers within a storage account but can be scoped within +// the expression to a single container. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. where is filters +// the results to return only to return only blobs whose tags match the specified expression. marker is a string value +// that identifies the portion of the list of containers to be returned with the next listing operation. The operation +// returns the NextMarker value within the response body if the listing operation did not return all containers +// remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter +// in a subsequent call to request the next page of list items. The marker value is opaque to the client. maxresults is +// specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a +// value greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a +// partition boundary, then the service will return a continuation token for retrieving the remainder of the results. +// For this reason, it is possible that the service will return fewer results than specified by maxresults, or than the +// default of 5000. +func (client serviceClient) FilterBlobs(ctx context.Context, timeout *int32, requestID *string, where *string, marker *string, maxresults *int32) (*FilterBlobSegment, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}, + {targetValue: maxresults, + constraints: []constraint{{target: "maxresults", name: null, rule: false, + chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.filterBlobsPreparer(timeout, requestID, where, marker, maxresults) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.filterBlobsResponder}, req) + if err != nil { + return nil, err + } + return resp.(*FilterBlobSegment), err +} + +// filterBlobsPreparer prepares the FilterBlobs request. +func (client serviceClient) filterBlobsPreparer(timeout *int32, requestID *string, where *string, marker *string, maxresults *int32) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + if where != nil && len(*where) > 0 { + params.Set("where", *where) + } + if marker != nil && len(*marker) > 0 { + params.Set("marker", *marker) + } + if maxresults != nil { + params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10)) + } + params.Set("comp", "blobs") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// filterBlobsResponder handles the response to the FilterBlobs request. +func (client serviceClient) filterBlobsResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &FilterBlobSegment{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// GetAccountInfo returns the sku name and account kind +func (client serviceClient) GetAccountInfo(ctx context.Context) (*ServiceGetAccountInfoResponse, error) { + req, err := client.getAccountInfoPreparer() + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccountInfoResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ServiceGetAccountInfoResponse), err +} + +// getAccountInfoPreparer prepares the GetAccountInfo request. +func (client serviceClient) getAccountInfoPreparer() (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + params.Set("restype", "account") + params.Set("comp", "properties") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + return req, nil +} + +// getAccountInfoResponder handles the response to the GetAccountInfo request. +func (client serviceClient) getAccountInfoResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ServiceGetAccountInfoResponse{rawResponse: resp.Response()}, err +} + +// GetProperties gets the properties of a storage account's Blob service, including properties for Storage Analytics +// and CORS (Cross-Origin Resource Sharing) rules. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client serviceClient) GetProperties(ctx context.Context, timeout *int32, requestID *string) (*StorageServiceProperties, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getPropertiesPreparer(timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPropertiesResponder}, req) + if err != nil { + return nil, err + } + return resp.(*StorageServiceProperties), err +} + +// getPropertiesPreparer prepares the GetProperties request. +func (client serviceClient) getPropertiesPreparer(timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "service") + params.Set("comp", "properties") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getPropertiesResponder handles the response to the GetProperties request. +func (client serviceClient) getPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &StorageServiceProperties{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// GetStatistics retrieves statistics related to replication for the Blob service. It is only available on the +// secondary location endpoint when read-access geo-redundant replication is enabled for the storage account. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client serviceClient) GetStatistics(ctx context.Context, timeout *int32, requestID *string) (*StorageServiceStats, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getStatisticsPreparer(timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getStatisticsResponder}, req) + if err != nil { + return nil, err + } + return resp.(*StorageServiceStats), err +} + +// getStatisticsPreparer prepares the GetStatistics request. +func (client serviceClient) getStatisticsPreparer(timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "service") + params.Set("comp", "stats") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getStatisticsResponder handles the response to the GetStatistics request. +func (client serviceClient) getStatisticsResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &StorageServiceStats{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// GetUserDelegationKey retrieves a user delegation key for the Blob service. This is only a valid operation when using +// bearer token authentication. +// +// keyInfo is key information timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client serviceClient) GetUserDelegationKey(ctx context.Context, keyInfo KeyInfo, timeout *int32, requestID *string) (*UserDelegationKey, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getUserDelegationKeyPreparer(keyInfo, timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getUserDelegationKeyResponder}, req) + if err != nil { + return nil, err + } + return resp.(*UserDelegationKey), err +} + +// getUserDelegationKeyPreparer prepares the GetUserDelegationKey request. +func (client serviceClient) getUserDelegationKeyPreparer(keyInfo KeyInfo, timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("POST", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "service") + params.Set("comp", "userdelegationkey") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + b, err := xml.Marshal(keyInfo) + if err != nil { + return req, pipeline.NewError(err, "failed to marshal request body") + } + req.Header.Set("Content-Type", "application/xml") + err = req.SetBody(bytes.NewReader(b)) + if err != nil { + return req, pipeline.NewError(err, "failed to set request body") + } + return req, nil +} + +// getUserDelegationKeyResponder handles the response to the GetUserDelegationKey request. +func (client serviceClient) getUserDelegationKeyResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &UserDelegationKey{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// ListContainersSegment the List Containers Segment operation returns a list of the containers under the specified +// account +// +// prefix is filters the results to return only containers whose name begins with the specified prefix. marker is a +// string value that identifies the portion of the list of containers to be returned with the next listing operation. +// The operation returns the NextMarker value within the response body if the listing operation did not return all +// containers remaining to be listed with the current page. The NextMarker value can be used as the value for the +// marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the +// client. maxresults is specifies the maximum number of containers to return. If the request does not specify +// maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the +// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the +// remainder of the results. For this reason, it is possible that the service will return fewer results than specified +// by maxresults, or than the default of 5000. include is include this parameter to specify that the container's +// metadata be returned as part of the response body. timeout is the timeout parameter is expressed in seconds. For +// more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client serviceClient) ListContainersSegment(ctx context.Context, prefix *string, marker *string, maxresults *int32, include []ListContainersIncludeType, timeout *int32, requestID *string) (*ListContainersSegmentResponse, error) { + if err := validate([]validation{ + {targetValue: maxresults, + constraints: []constraint{{target: "maxresults", name: null, rule: false, + chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.listContainersSegmentPreparer(prefix, marker, maxresults, include, timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.listContainersSegmentResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ListContainersSegmentResponse), err +} + +// listContainersSegmentPreparer prepares the ListContainersSegment request. +func (client serviceClient) listContainersSegmentPreparer(prefix *string, marker *string, maxresults *int32, include []ListContainersIncludeType, timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if prefix != nil && len(*prefix) > 0 { + params.Set("prefix", *prefix) + } + if marker != nil && len(*marker) > 0 { + params.Set("marker", *marker) + } + if maxresults != nil { + params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10)) + } + if include != nil && len(include) > 0 { + params.Set("include", joinConst(include, ",")) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "list") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// listContainersSegmentResponder handles the response to the ListContainersSegment request. +func (client serviceClient) listContainersSegmentResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &ListContainersSegmentResponse{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// SetProperties sets properties for a storage account's Blob service endpoint, including properties for Storage +// Analytics and CORS (Cross-Origin Resource Sharing) rules +// +// storageServiceProperties is the StorageService properties. timeout is the timeout parameter is expressed in seconds. +// For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client serviceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, timeout *int32, requestID *string) (*ServiceSetPropertiesResponse, error) { + if err := validate([]validation{ + {targetValue: storageServiceProperties, + constraints: []constraint{{target: "storageServiceProperties.Logging", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy", name: null, rule: true, + chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy.Days", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, + }}, + }}, + {target: "storageServiceProperties.HourMetrics", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy.Days", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, + }}, + }}, + {target: "storageServiceProperties.MinuteMetrics", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy.Days", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, + }}, + }}, + {target: "storageServiceProperties.DeleteRetentionPolicy", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.DeleteRetentionPolicy.Days", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.DeleteRetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, + }}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setPropertiesPreparer(storageServiceProperties, timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setPropertiesResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ServiceSetPropertiesResponse), err +} + +// setPropertiesPreparer prepares the SetProperties request. +func (client serviceClient) setPropertiesPreparer(storageServiceProperties StorageServiceProperties, timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "service") + params.Set("comp", "properties") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + b, err := xml.Marshal(storageServiceProperties) + if err != nil { + return req, pipeline.NewError(err, "failed to marshal request body") + } + req.Header.Set("Content-Type", "application/xml") + err = req.SetBody(bytes.NewReader(b)) + if err != nil { + return req, pipeline.NewError(err, "failed to set request body") + } + return req, nil +} + +// setPropertiesResponder handles the response to the SetProperties request. +func (client serviceClient) setPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ServiceSetPropertiesResponse{rawResponse: resp.Response()}, err +} + +// SubmitBatch the Batch operation allows multiple API calls to be embedded into a single HTTP request. +// +// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an +// error.contentLength is the length of the request. multipartContentType is required. The value of this header must be +// multipart/mixed with a batch boundary. Example header value: multipart/mixed; boundary=batch_ timeout is the +// timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client serviceClient) SubmitBatch(ctx context.Context, body io.ReadSeeker, contentLength int64, multipartContentType string, timeout *int32, requestID *string) (*SubmitBatchResponse, error) { + if err := validate([]validation{ + {targetValue: body, + constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.submitBatchPreparer(body, contentLength, multipartContentType, timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.submitBatchResponder}, req) + if err != nil { + return nil, err + } + return resp.(*SubmitBatchResponse), err +} + +// submitBatchPreparer prepares the SubmitBatch request. +func (client serviceClient) submitBatchPreparer(body io.ReadSeeker, contentLength int64, multipartContentType string, timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("POST", client.url, body) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "batch") + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + req.Header.Set("Content-Type", multipartContentType) + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// submitBatchResponder handles the response to the SubmitBatch request. +func (client serviceClient) submitBatchResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + return &SubmitBatchResponse{rawResponse: resp.Response()}, err +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_validation.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_validation.go new file mode 100644 index 000000000..98a2614e6 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_validation.go @@ -0,0 +1,367 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "fmt" + "github.com/Azure/azure-pipeline-go/pipeline" + "reflect" + "regexp" + "strings" +) + +// Constraint stores constraint name, target field name +// Rule and chain validations. +type constraint struct { + // Target field name for validation. + target string + + // Constraint name e.g. minLength, MaxLength, Pattern, etc. + name string + + // Rule for constraint e.g. greater than 10, less than 5 etc. + rule interface{} + + // Chain validations for struct type + chain []constraint +} + +// Validation stores parameter-wise validation. +type validation struct { + targetValue interface{} + constraints []constraint +} + +// Constraint list +const ( + empty = "Empty" + null = "Null" + readOnly = "ReadOnly" + pattern = "Pattern" + maxLength = "MaxLength" + minLength = "MinLength" + maxItems = "MaxItems" + minItems = "MinItems" + multipleOf = "MultipleOf" + uniqueItems = "UniqueItems" + inclusiveMaximum = "InclusiveMaximum" + exclusiveMaximum = "ExclusiveMaximum" + exclusiveMinimum = "ExclusiveMinimum" + inclusiveMinimum = "InclusiveMinimum" +) + +// Validate method validates constraints on parameter +// passed in validation array. +func validate(m []validation) error { + for _, item := range m { + v := reflect.ValueOf(item.targetValue) + for _, constraint := range item.constraints { + var err error + switch v.Kind() { + case reflect.Ptr: + err = validatePtr(v, constraint) + case reflect.String: + err = validateString(v, constraint) + case reflect.Struct: + err = validateStruct(v, constraint) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + err = validateInt(v, constraint) + case reflect.Float32, reflect.Float64: + err = validateFloat(v, constraint) + case reflect.Array, reflect.Slice, reflect.Map: + err = validateArrayMap(v, constraint) + default: + err = createError(v, constraint, fmt.Sprintf("unknown type %v", v.Kind())) + } + if err != nil { + return err + } + } + } + return nil +} + +func validateStruct(x reflect.Value, v constraint, name ...string) error { + //Get field name from target name which is in format a.b.c + s := strings.Split(v.target, ".") + f := x.FieldByName(s[len(s)-1]) + if isZero(f) { + return createError(x, v, fmt.Sprintf("field %q doesn't exist", v.target)) + } + err := validate([]validation{ + { + targetValue: getInterfaceValue(f), + constraints: []constraint{v}, + }, + }) + return err +} + +func validatePtr(x reflect.Value, v constraint) error { + if v.name == readOnly { + if !x.IsNil() { + return createError(x.Elem(), v, "readonly parameter; must send as nil or empty in request") + } + return nil + } + if x.IsNil() { + return checkNil(x, v) + } + if v.chain != nil { + return validate([]validation{ + { + targetValue: getInterfaceValue(x.Elem()), + constraints: v.chain, + }, + }) + } + return nil +} + +func validateInt(x reflect.Value, v constraint) error { + i := x.Int() + r, ok := v.rule.(int) + if !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule)) + } + switch v.name { + case multipleOf: + if i%int64(r) != 0 { + return createError(x, v, fmt.Sprintf("value must be a multiple of %v", r)) + } + case exclusiveMinimum: + if i <= int64(r) { + return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) + } + case exclusiveMaximum: + if i >= int64(r) { + return createError(x, v, fmt.Sprintf("value must be less than %v", r)) + } + case inclusiveMinimum: + if i < int64(r) { + return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) + } + case inclusiveMaximum: + if i > int64(r) { + return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) + } + default: + return createError(x, v, fmt.Sprintf("constraint %v is not applicable for type integer", v.name)) + } + return nil +} + +func validateFloat(x reflect.Value, v constraint) error { + f := x.Float() + r, ok := v.rule.(float64) + if !ok { + return createError(x, v, fmt.Sprintf("rule must be float value for %v constraint; got: %v", v.name, v.rule)) + } + switch v.name { + case exclusiveMinimum: + if f <= r { + return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) + } + case exclusiveMaximum: + if f >= r { + return createError(x, v, fmt.Sprintf("value must be less than %v", r)) + } + case inclusiveMinimum: + if f < r { + return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) + } + case inclusiveMaximum: + if f > r { + return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) + } + default: + return createError(x, v, fmt.Sprintf("constraint %s is not applicable for type float", v.name)) + } + return nil +} + +func validateString(x reflect.Value, v constraint) error { + s := x.String() + switch v.name { + case empty: + if len(s) == 0 { + return checkEmpty(x, v) + } + case pattern: + reg, err := regexp.Compile(v.rule.(string)) + if err != nil { + return createError(x, v, err.Error()) + } + if !reg.MatchString(s) { + return createError(x, v, fmt.Sprintf("value doesn't match pattern %v", v.rule)) + } + case maxLength: + if _, ok := v.rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule)) + } + if len(s) > v.rule.(int) { + return createError(x, v, fmt.Sprintf("value length must be less than %v", v.rule)) + } + case minLength: + if _, ok := v.rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule)) + } + if len(s) < v.rule.(int) { + return createError(x, v, fmt.Sprintf("value length must be greater than %v", v.rule)) + } + case readOnly: + if len(s) > 0 { + return createError(reflect.ValueOf(s), v, "readonly parameter; must send as nil or empty in request") + } + default: + return createError(x, v, fmt.Sprintf("constraint %s is not applicable to string type", v.name)) + } + if v.chain != nil { + return validate([]validation{ + { + targetValue: getInterfaceValue(x), + constraints: v.chain, + }, + }) + } + return nil +} + +func validateArrayMap(x reflect.Value, v constraint) error { + switch v.name { + case null: + if x.IsNil() { + return checkNil(x, v) + } + case empty: + if x.IsNil() || x.Len() == 0 { + return checkEmpty(x, v) + } + case maxItems: + if _, ok := v.rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.name, v.rule)) + } + if x.Len() > v.rule.(int) { + return createError(x, v, fmt.Sprintf("maximum item limit is %v; got: %v", v.rule, x.Len())) + } + case minItems: + if _, ok := v.rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.name, v.rule)) + } + if x.Len() < v.rule.(int) { + return createError(x, v, fmt.Sprintf("minimum item limit is %v; got: %v", v.rule, x.Len())) + } + case uniqueItems: + if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { + if !checkForUniqueInArray(x) { + return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.target, x)) + } + } else if x.Kind() == reflect.Map { + if !checkForUniqueInMap(x) { + return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.target, x)) + } + } else { + return createError(x, v, fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", v.name, x.Kind())) + } + case readOnly: + if x.Len() != 0 { + return createError(x, v, "readonly parameter; must send as nil or empty in request") + } + case pattern: + reg, err := regexp.Compile(v.rule.(string)) + if err != nil { + return createError(x, v, err.Error()) + } + keys := x.MapKeys() + for _, k := range keys { + if !reg.MatchString(k.String()) { + return createError(k, v, fmt.Sprintf("map key doesn't match pattern %v", v.rule)) + } + } + default: + return createError(x, v, fmt.Sprintf("constraint %v is not applicable to array, slice and map type", v.name)) + } + if v.chain != nil { + return validate([]validation{ + { + targetValue: getInterfaceValue(x), + constraints: v.chain, + }, + }) + } + return nil +} + +func checkNil(x reflect.Value, v constraint) error { + if _, ok := v.rule.(bool); !ok { + return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.name, v.rule)) + } + if v.rule.(bool) { + return createError(x, v, "value can not be null; required parameter") + } + return nil +} + +func checkEmpty(x reflect.Value, v constraint) error { + if _, ok := v.rule.(bool); !ok { + return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.name, v.rule)) + } + if v.rule.(bool) { + return createError(x, v, "value can not be null or empty; required parameter") + } + return nil +} + +func checkForUniqueInArray(x reflect.Value) bool { + if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { + return false + } + arrOfInterface := make([]interface{}, x.Len()) + for i := 0; i < x.Len(); i++ { + arrOfInterface[i] = x.Index(i).Interface() + } + m := make(map[interface{}]bool) + for _, val := range arrOfInterface { + if m[val] { + return false + } + m[val] = true + } + return true +} + +func checkForUniqueInMap(x reflect.Value) bool { + if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { + return false + } + mapOfInterface := make(map[interface{}]interface{}, x.Len()) + keys := x.MapKeys() + for _, k := range keys { + mapOfInterface[k.Interface()] = x.MapIndex(k).Interface() + } + m := make(map[interface{}]bool) + for _, val := range mapOfInterface { + if m[val] { + return false + } + m[val] = true + } + return true +} + +func getInterfaceValue(x reflect.Value) interface{} { + if x.Kind() == reflect.Invalid { + return nil + } + return x.Interface() +} + +func isZero(x interface{}) bool { + return x == reflect.Zero(reflect.TypeOf(x)).Interface() +} + +func createError(x reflect.Value, v constraint, message string) error { + return pipeline.NewError(nil, fmt.Sprintf("validation failed: parameter=%s constraint=%s value=%#v details: %s", + v.target, v.name, getInterfaceValue(x), message)) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_version.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_version.go new file mode 100644 index 000000000..d7672a1df --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_version.go @@ -0,0 +1,14 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return "Azure-SDK-For-Go/0.0.0 azblob/2020-10-02" +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return "0.0.0" +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_response_helpers.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_response_helpers.go new file mode 100644 index 000000000..45be7e0fd --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_response_helpers.go @@ -0,0 +1,240 @@ +package azblob + +import ( + "context" + "io" + "net/http" + "time" +) + +// BlobHTTPHeaders contains read/writeable blob properties. +type BlobHTTPHeaders struct { + ContentType string + ContentMD5 []byte + ContentEncoding string + ContentLanguage string + ContentDisposition string + CacheControl string +} + +// NewHTTPHeaders returns the user-modifiable properties for this blob. +func (bgpr BlobGetPropertiesResponse) NewHTTPHeaders() BlobHTTPHeaders { + return BlobHTTPHeaders{ + ContentType: bgpr.ContentType(), + ContentEncoding: bgpr.ContentEncoding(), + ContentLanguage: bgpr.ContentLanguage(), + ContentDisposition: bgpr.ContentDisposition(), + CacheControl: bgpr.CacheControl(), + ContentMD5: bgpr.ContentMD5(), + } +} + +// ///////////////////////////////////////////////////////////////////////////// + +// NewHTTPHeaders returns the user-modifiable properties for this blob. +func (dr downloadResponse) NewHTTPHeaders() BlobHTTPHeaders { + return BlobHTTPHeaders{ + ContentType: dr.ContentType(), + ContentEncoding: dr.ContentEncoding(), + ContentLanguage: dr.ContentLanguage(), + ContentDisposition: dr.ContentDisposition(), + CacheControl: dr.CacheControl(), + ContentMD5: dr.ContentMD5(), + } +} + +// ///////////////////////////////////////////////////////////////////////////// + +// DownloadResponse wraps AutoRest generated downloadResponse and helps to provide info for retry. +type DownloadResponse struct { + r *downloadResponse + ctx context.Context + b BlobURL + getInfo HTTPGetterInfo +} + +// Body constructs new RetryReader stream for reading data. If a connection failes +// while reading, it will make additional requests to reestablish a connection and +// continue reading. Specifying a RetryReaderOption's with MaxRetryRequests set to 0 +// (the default), returns the original response body and no retries will be performed. +func (r *DownloadResponse) Body(o RetryReaderOptions) io.ReadCloser { + if o.MaxRetryRequests == 0 { // No additional retries + return r.Response().Body + } + return NewRetryReader(r.ctx, r.Response(), r.getInfo, o, + func(ctx context.Context, getInfo HTTPGetterInfo) (*http.Response, error) { + resp, err := r.b.Download(ctx, getInfo.Offset, getInfo.Count, BlobAccessConditions{ + ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: getInfo.ETag}, + }, false, o.ClientProvidedKeyOptions) + if err != nil { + return nil, err + } + return resp.Response(), err + }, + ) +} + +// Response returns the raw HTTP response object. +func (r DownloadResponse) Response() *http.Response { + return r.r.Response() +} + +// NewHTTPHeaders returns the user-modifiable properties for this blob. +func (r DownloadResponse) NewHTTPHeaders() BlobHTTPHeaders { + return r.r.NewHTTPHeaders() +} + +// BlobContentMD5 returns the value for header x-ms-blob-content-md5. +func (r DownloadResponse) BlobContentMD5() []byte { + return r.r.BlobContentMD5() +} + +// ContentMD5 returns the value for header Content-MD5. +func (r DownloadResponse) ContentMD5() []byte { + return r.r.ContentMD5() +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (r DownloadResponse) StatusCode() int { + return r.r.StatusCode() +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (r DownloadResponse) Status() string { + return r.r.Status() +} + +// AcceptRanges returns the value for header Accept-Ranges. +func (r DownloadResponse) AcceptRanges() string { + return r.r.AcceptRanges() +} + +// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. +func (r DownloadResponse) BlobCommittedBlockCount() int32 { + return r.r.BlobCommittedBlockCount() +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (r DownloadResponse) BlobSequenceNumber() int64 { + return r.r.BlobSequenceNumber() +} + +// BlobType returns the value for header x-ms-blob-type. +func (r DownloadResponse) BlobType() BlobType { + return r.r.BlobType() +} + +// CacheControl returns the value for header Cache-Control. +func (r DownloadResponse) CacheControl() string { + return r.r.CacheControl() +} + +// ContentDisposition returns the value for header Content-Disposition. +func (r DownloadResponse) ContentDisposition() string { + return r.r.ContentDisposition() +} + +// ContentEncoding returns the value for header Content-Encoding. +func (r DownloadResponse) ContentEncoding() string { + return r.r.ContentEncoding() +} + +// ContentLanguage returns the value for header Content-Language. +func (r DownloadResponse) ContentLanguage() string { + return r.r.ContentLanguage() +} + +// ContentLength returns the value for header Content-Length. +func (r DownloadResponse) ContentLength() int64 { + return r.r.ContentLength() +} + +// ContentRange returns the value for header Content-Range. +func (r DownloadResponse) ContentRange() string { + return r.r.ContentRange() +} + +// ContentType returns the value for header Content-Type. +func (r DownloadResponse) ContentType() string { + return r.r.ContentType() +} + +// CopyCompletionTime returns the value for header x-ms-copy-completion-time. +func (r DownloadResponse) CopyCompletionTime() time.Time { + return r.r.CopyCompletionTime() +} + +// CopyID returns the value for header x-ms-copy-id. +func (r DownloadResponse) CopyID() string { + return r.r.CopyID() +} + +// CopyProgress returns the value for header x-ms-copy-progress. +func (r DownloadResponse) CopyProgress() string { + return r.r.CopyProgress() +} + +// CopySource returns the value for header x-ms-copy-source. +func (r DownloadResponse) CopySource() string { + return r.r.CopySource() +} + +// CopyStatus returns the value for header x-ms-copy-status. +func (r DownloadResponse) CopyStatus() CopyStatusType { + return r.r.CopyStatus() +} + +// CopyStatusDescription returns the value for header x-ms-copy-status-description. +func (r DownloadResponse) CopyStatusDescription() string { + return r.r.CopyStatusDescription() +} + +// Date returns the value for header Date. +func (r DownloadResponse) Date() time.Time { + return r.r.Date() +} + +// ETag returns the value for header ETag. +func (r DownloadResponse) ETag() ETag { + return ETag(r.r.ETag()) +} + +// IsServerEncrypted returns the value for header x-ms-server-encrypted. +func (r DownloadResponse) IsServerEncrypted() string { + return r.r.IsServerEncrypted() +} + +// LastModified returns the value for header Last-Modified. +func (r DownloadResponse) LastModified() time.Time { + return r.r.LastModified() +} + +// LeaseDuration returns the value for header x-ms-lease-duration. +func (r DownloadResponse) LeaseDuration() LeaseDurationType { + return r.r.LeaseDuration() +} + +// LeaseState returns the value for header x-ms-lease-state. +func (r DownloadResponse) LeaseState() LeaseStateType { + return r.r.LeaseState() +} + +// LeaseStatus returns the value for header x-ms-lease-status. +func (r DownloadResponse) LeaseStatus() LeaseStatusType { + return r.r.LeaseStatus() +} + +// RequestID returns the value for header x-ms-request-id. +func (r DownloadResponse) RequestID() string { + return r.r.RequestID() +} + +// Version returns the value for header x-ms-version. +func (r DownloadResponse) Version() string { + return r.r.Version() +} + +// NewMetadata returns user-defined key/value pairs. +func (r DownloadResponse) NewMetadata() Metadata { + return r.r.NewMetadata() +} diff --git a/vendor/github.com/Azure/go-autorest/.gitignore b/vendor/github.com/Azure/go-autorest/.gitignore new file mode 100644 index 000000000..3350aaf70 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/.gitignore @@ -0,0 +1,32 @@ +# The standard Go .gitignore file follows. (Sourced from: github.com/github/gitignore/master/Go.gitignore) +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.DS_Store +.idea/ +.vscode/ + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# go-autorest specific +vendor/ +autorest/azure/example/example diff --git a/vendor/github.com/Azure/go-autorest/CHANGELOG.md b/vendor/github.com/Azure/go-autorest/CHANGELOG.md new file mode 100644 index 000000000..d1f596bfc --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/CHANGELOG.md @@ -0,0 +1,1004 @@ +# CHANGELOG + +## v14.2.0 + +- Added package comment to make `github.com/Azure/go-autorest` importable. + +## v14.1.1 + +### Bug Fixes + +- Change `x-ms-authorization-auxiliary` header value separator to comma. + +## v14.1.0 + +### New Features + +- Added `azure.SetEnvironment()` that will update the global environments map with the specified values. + +## v14.0.1 + +### Bug Fixes + +- Fix race condition when refreshing token. +- Fixed some tests to work with Go 1.14. + +## v14.0.0 + +## Breaking Changes + +- By default, the `DoRetryForStatusCodes` functions will no longer infinitely retry a request when the response returns an HTTP status code of 429 (StatusTooManyRequests). To opt in to the old behavior set `autorest.Count429AsRetry` to `false`. + +## New Features + +- Variable `autorest.Max429Delay` can be used to control the maximum delay between retries when a 429 is received with no `Retry-After` header. The default is zero which means there is no cap. + +## v13.4.0 + +## New Features + +- Added field `SendDecorators` to the `Client` type. This can be used to specify a custom chain of SendDecorators per client. +- Added method `Client.Send()` which includes logic for selecting the preferred chain of SendDecorators. + +## v13.3.3 + +### Bug Fixes + +- Fixed connection leak when retrying requests. +- Enabled exponential back-off with a 2-minute cap when retrying on 429. +- Fixed some cases where errors were inadvertently dropped. + +## v13.3.2 + +### Bug Fixes + +- Updated `autorest.AsStringSlice()` to convert slice elements to their string representation. + +## v13.3.1 + +- Updated external dependencies. + +### Bug Fixes + +## v13.3.0 + +### New Features + +- Added support for shared key and shared access signature token authorization. + - `autorest.NewSharedKeyAuthorizer()` and dependent types. + - `autorest.NewSASTokenAuthorizer()` and dependent types. +- Added `ServicePrincipalToken.SetCustomRefresh()` so a custom refresh function can be invoked when a token has expired. + +### Bug Fixes + +- Fixed `cli.AccessTokensPath()` to respect `AZURE_CONFIG_DIR` when set. +- Support parsing error messages in XML responses. + +## v13.2.0 + +### New Features + +- Added the following functions to replace their versions that don't take a context. + - `adal.InitiateDeviceAuthWithContext()` + - `adal.CheckForUserCompletionWithContext()` + - `adal.WaitForUserCompletionWithContext()` + +## v13.1.0 + +### New Features + +- Added support for MSI authentication on Azure App Service and Azure Functions. + +## v13.0.2 + +### Bug Fixes + +- Always retry a request even if the sender returns a non-nil error. + +## v13.0.1 + +## Bug Fixes + +- Fixed `autorest.WithQueryParameters()` so that it properly encodes multi-value query parameters. + +## v13.0.0 + +## Breaking Changes + +The `tracing` package has been rewritten to provide a common interface for consumers to wire in the tracing package of their choice. +What this means is that by default no tracing provider will be compiled into your program and setting the `AZURE_SDK_TRACING_ENABLED` +environment variable will have no effect. To enable this previous behavior you must now add the following import to your source file. +```go + import _ "github.com/Azure/go-autorest/tracing/opencensus" +``` +The APIs required by autorest-generated code have remained but some APIs have been removed and new ones added. +The following APIs and variables have been removed (the majority of them were moved to the `opencensus` package). +- tracing.Transport +- tracing.Enable() +- tracing.EnableWithAIForwarding() +- tracing.Disable() + +The following APIs and types have been added +- tracing.Tracer +- tracing.Register() + +To hook up a tracer simply call `tracing.Register()` passing in a type that satisfies the `tracing.Tracer` interface. + +## v12.4.3 + +### Bug Fixes + +- `autorest.MultiTenantServicePrincipalTokenAuthorizer` will now properly add its auxiliary bearer tokens. + +## v12.4.2 + +### Bug Fixes + +- Improvements to the fixes made in v12.4.1. + - Remove `override` stanza from Gopkg.toml and `replace` directive from go.mod as they don't apply when being consumed as a dependency. + - Switched to latest version of `ocagent` that still depends on protobuf v1.2. + - Add indirect dependencies to the `required` clause with matching `constraint` stanzas so that `dep` dependencies match go.sum. + +## v12.4.1 + +### Bug Fixes + +- Updated OpenCensus and OCAgent versions to versions that don't depend on v1.3+ of protobuf as it was breaking kubernetes. +- Pinned opencensus-proto to a version that's compatible with our versions of OpenCensus and OCAgent. + +## v12.4.0 + +### New Features + +- Added `autorest.WithPrepareDecorators` and `autorest.GetPrepareDecorators` for adding and retrieving a custom chain of PrepareDecorators to the provided context. + +## v12.3.0 + +### New Features + +- Support for multi-tenant via x-ms-authorization-auxiliary header has been added for client credentials with + secret scenario; this basically bundles multiple OAuthConfig and ServicePrincipalToken types into corresponding + MultiTenant* types along with a new authorizer that adds the primary and auxiliary token headers to the reqest. + The authenticaion helpers have been updated to support this scenario; if environment var AZURE_AUXILIARY_TENANT_IDS + is set with a semicolon delimited list of tenants the multi-tenant codepath will kick in to create the appropriate authorizer. + See `adal.NewMultiTenantOAuthConfig`, `adal.NewMultiTenantServicePrincipalToken` and `autorest.NewMultiTenantServicePrincipalTokenAuthorizer` + along with their supporting types and methods. +- Added `autorest.WithSendDecorators` and `autorest.GetSendDecorators` for adding and retrieving a custom chain of SendDecorators to the provided context. +- Added `autorest.DoRetryForStatusCodesWithCap` and `autorest.DelayForBackoffWithCap` to enforce an upper bound on the duration between retries. + +## v12.2.0 + +### New Features + +- Added `autorest.WithXML`, `autorest.AsMerge`, `autorest.WithBytes` preparer decorators. +- Added `autorest.ByUnmarshallingBytes` response decorator. +- Added `Response.IsHTTPStatus` and `Response.HasHTTPStatus` helper methods for inspecting HTTP status code in `autorest.Response` types. + +### Bug Fixes + +- `autorest.DelayWithRetryAfter` now supports HTTP-Dates in the `Retry-After` header and is not limited to just 429 status codes. + +## v12.1.0 + +### New Features + +- Added `to.ByteSlicePtr()`. +- Added blob/queue storage resource ID to `azure.ResourceIdentifier`. + +## v12.0.0 + +### Breaking Changes + +In preparation for modules the following deprecated content has been removed. + + - async.NewFuture() + - async.Future.Done() + - async.Future.WaitForCompletion() + - async.DoPollForAsynchronous() + - The `utils` package + - validation.NewErrorWithValidationError() + - The `version` package + +## v11.9.0 + +### New Features + +- Add `ResourceIdentifiers` field to `azure.Environment` containing resource IDs for public and sovereign clouds. + +## v11.8.0 + +### New Features + +- Added `autorest.NewClientWithOptions()` to support endpoints that require free renegotiation. + +## v11.7.1 + +### Bug Fixes + +- Fix missing support for http(s) proxy when using the default sender. + +## v11.7.0 + +### New Features + +- Added methods to obtain a ServicePrincipalToken on the various credential configuration types in the `auth` package. + +## v11.6.1 + +### Bug Fixes + +- Fix ACR DNS endpoint for government clouds. +- Add Cosmos DB DNS endpoints. +- Update dependencies to resolve build breaks in OpenCensus. + +## v11.6.0 + +### New Features + +- Added type `autorest.BasicAuthorizer` to support Basic authentication. + +## v11.5.2 + +### Bug Fixes + +- Fixed `GetTokenFromCLI` did not work with zsh. + +## v11.5.1 + +### Bug Fixes + +- In `Client.sender()` set the minimum TLS version on HTTP clients to 1.2. + +## v11.5.0 + +### New Features + +- The `auth` package has been refactored so that the environment and file settings are now available. +- The methods used in `auth.NewAuthorizerFromEnvironment()` are now exported so that custom authorization chains can be created. +- Added support for certificate authorization for file-based config. + +## v11.4.0 + +### New Features + +- Added `adal.AddToUserAgent()` so callers can append custom data to the user-agent header used for ADAL requests. +- Exported `adal.UserAgent()` for parity with `autorest.Client`. + +## v11.3.2 + +### Bug Fixes + +- In `Future.WaitForCompletionRef()` if the provided context has a deadline don't add the default deadline. + +## v11.3.1 + +### Bug Fixes + +- For an LRO PUT operation the final GET URL was incorrectly set to the Location polling header in some cases. + +## v11.3.0 + +### New Features + +- Added method `ServicePrincipalToken()` to `DeviceFlowConfig` type. + +## v11.2.8 + +### Bug Fixes + +- Deprecate content in the `version` package. The functionality has been superseded by content in the `autorest` package. + +## v11.2.7 + +### Bug Fixes + +- Fix environment variable name for enabling tracing from `AZURE_SDK_TRACING_ENABELD` to `AZURE_SDK_TRACING_ENABLED`. + Note that for backward compatibility reasons, both will work until the next major version release of the package. + +## v11.2.6 + +### Bug Fixes + +- If zero bytes are read from a polling response body don't attempt to unmarshal them. + +## v11.2.5 + +### Bug Fixes + +- Removed race condition in `autorest.DoRetryForStatusCodes`. + +## v11.2.4 + +### Bug Fixes + +- Function `cli.ProfilePath` now respects environment `AZURE_CONFIG_DIR` if available. + +## v11.2.1 + +NOTE: Versions of Go prior to 1.10 have been removed from CI as they no +longer work with golint. + +### Bug Fixes + +- Method `MSIConfig.Authorizer` now supports user-assigned identities. +- The adal package now reports its own user-agent string. + +## v11.2.0 + +### New Features + +- Added `tracing` package that enables instrumentation of HTTP and API calls. + Setting the env variable `AZURE_SDK_TRACING_ENABLED` or calling `tracing.Enable` + will start instrumenting the code for metrics and traces. + Additionally, setting the env variable `OCAGENT_TRACE_EXPORTER_ENDPOINT` or + calling `tracing.EnableWithAIForwarding` will start the instrumentation and connect to an + App Insights Local Forwarder that is needs to be running. Note that if the + AI Local Forwarder is not running tracking will still be enabled. + By default, instrumentation is disabled. Once enabled, instrumentation can also + be programatically disabled by calling `Disable`. +- Added `DoneWithContext` call for checking LRO status. `Done` has been deprecated. + +### Bug Fixes + +- Don't use the initial request's context for LRO polling. +- Don't override the `refreshLock` and the `http.Client` when unmarshalling `ServicePrincipalToken` if + it is already set. + +## v11.1.1 + +### Bug Fixes + +- When creating a future always include the polling tracker even if there's a failure; this allows the underlying response to be obtained by the caller. + +## v11.1.0 + +### New Features + +- Added `auth.NewAuthorizerFromCLI` to create an authorizer configured from the Azure 2.0 CLI. +- Added `adal.NewOAuthConfigWithAPIVersion` to create an OAuthConfig with the specified API version. + +## v11.0.1 + +### New Features + +- Added `x5c` header to client assertion for certificate Issuer+Subject Name authentication. + +## v11.0.0 + +### Breaking Changes + +- To handle differences between ADFS and AAD the following fields have had their types changed from `string` to `json.Number` + - ExpiresIn + - ExpiresOn + - NotBefore + +### New Features + +- Added `auth.NewAuthorizerFromFileWithResource` to create an authorizer from the config file with the specified resource. +- Setting a client's `PollingDuration` to zero will use the provided context to control a LRO's polling duration. + +## v10.15.5 + +### Bug Fixes + +- In `DoRetryForStatusCodes`, if a request's context is cancelled return the last response. + +## v10.15.4 + +### Bug Fixes + +- If a polling operation returns a failure status code return the associated error. + +## v10.15.3 + +### Bug Fixes + +- Initialize the polling URL and method for an LRO tracker on each iteration, favoring the Azure-AsyncOperation header. + +## v10.15.2 + +### Bug Fixes + +- Use fmt.Fprint when printing request/response so that any escape sequences aren't treated as format specifiers. + +## v10.15.1 + +### Bug Fixes + +- If an LRO API returns a `Failed` provisioning state in the initial response return an error at that point so the caller doesn't have to poll. +- For failed LROs without an OData v4 error include the response body in the error's `AdditionalInfo` field to aid in diagnosing the failure. + +## v10.15.0 + +### New Features + +- Add initial support for request/response logging via setting environment variables. + Setting `AZURE_GO_SDK_LOG_LEVEL` to `LogInfo` will log request/response + without their bodies. To include the bodies set the log level to `LogDebug`. + By default the logger writes to strerr, however it can also write to stdout or a file + if specified in `AZURE_GO_SDK_LOG_FILE`. Note that if the specified file + already exists it will be truncated. + IMPORTANT: by default the logger will redact the Authorization and Ocp-Apim-Subscription-Key + headers. Any other secrets will _not_ be redacted. + +## v10.14.0 + +### New Features + +- Added package version that contains version constants and user-agent data. + +### Bug Fixes + +- Add the user-agent to token requests. + +## v10.13.0 + +- Added support for additionalInfo in ServiceError type. + +## v10.12.0 + +### New Features + +- Added field ServicePrincipalToken.MaxMSIRefreshAttempts to configure the maximun number of attempts to refresh an MSI token. + +## v10.11.4 + +### Bug Fixes + +- If an LRO returns http.StatusOK on the initial response with no async headers return the response body from Future.GetResult(). +- If there is no "final GET URL" return an error from Future.GetResult(). + +## v10.11.3 + +### Bug Fixes + +- In IMDS retry logic, if we don't receive a response don't retry. + - Renamed the retry function so it's clear it's meant for IMDS only. +- For error response bodies that aren't OData-v4 compliant stick the raw JSON in the ServiceError.Details field so the information isn't lost. + - Also add the raw HTTP response to the DetailedResponse. +- Removed superfluous wrapping of response error in azure.DoRetryWithRegistration(). + +## v10.11.2 + +### Bug Fixes + +- Validation for integers handles int and int64 types. + +## v10.11.1 + +### Bug Fixes + +- Adding User information to authorization config as parsed from CLI cache. + +## v10.11.0 + +### New Features + +- Added NewServicePrincipalTokenFromManualTokenSecret for creating a new SPT using a manual token and secret +- Added method ServicePrincipalToken.MarshalTokenJSON() to marshall the inner Token + +## v10.10.0 + +### New Features + +- Most ServicePrincipalTokens can now be marshalled/unmarshall to/from JSON (ServicePrincipalCertificateSecret and ServicePrincipalMSISecret are not supported). +- Added method ServicePrincipalToken.SetRefreshCallbacks(). + +## v10.9.2 + +### Bug Fixes + +- Refreshing a refresh token obtained from a web app authorization code now works. + +## v10.9.1 + +### Bug Fixes + +- The retry logic for MSI token requests now uses exponential backoff per the guidelines. +- IsTemporaryNetworkError() will return true for errors that don't implement the net.Error interface. + +## v10.9.0 + +### Deprecated Methods + +| Old Method | New Method | +| -------------------------: | :---------------------------: | +| azure.NewFuture() | azure.NewFutureFromResponse() | +| Future.WaitForCompletion() | Future.WaitForCompletionRef() | + +### New Features + +- Added azure.NewFutureFromResponse() for creating a Future from the initial response from an async operation. +- Added Future.GetResult() for making the final GET call to retrieve the result from an async operation. + +### Bug Fixes + +- Some futures failed to return their results, this should now be fixed. + +## v10.8.2 + +### Bug Fixes + +- Add nil-gaurd to token retry logic. + +## v10.8.1 + +### Bug Fixes + +- Return a TokenRefreshError if the sender fails on the initial request. +- Don't retry on non-temporary network errors. + +## v10.8.0 + +- Added NewAuthorizerFromEnvironmentWithResource() helper function. + +## v10.7.0 + +### New Features + +- Added \*WithContext() methods to ADAL token refresh operations. + +## v10.6.2 + +- Fixed a bug on device authentication. + +## v10.6.1 + +- Added retries to MSI token get request. + +## v10.6.0 + +- Changed MSI token implementation. Now, the token endpoint is the IMDS endpoint. + +## v10.5.1 + +### Bug Fixes + +- `DeviceFlowConfig.Authorizer()` now prints the device code message when running `go test`. `-v` flag is required. + +## v10.5.0 + +### New Features + +- Added NewPollingRequestWithContext() for use with polling asynchronous operations. + +### Bug Fixes + +- Make retry logic use the request's context instead of the deprecated Cancel object. + +## v10.4.0 + +### New Features + +- Added helper for parsing Azure Resource ID's. +- Added deprecation message to utils.GetEnvVarOrExit() + +## v10.3.0 + +### New Features + +- Added EnvironmentFromURL method to load an Environment from a given URL. This function is particularly useful in the private and hybrid Cloud model, where one may define their own endpoints +- Added TokenAudience endpoint to Environment structure. This is useful in private and hybrid cloud models where TokenAudience endpoint can be different from ResourceManagerEndpoint + +## v10.2.0 + +### New Features + +- Added endpoints for batch management. + +## v10.1.3 + +### Bug Fixes + +- In Client.Do() invoke WithInspection() last so that it will inspect WithAuthorization(). +- Fixed authorization methods to invoke p.Prepare() first, aligning them with the other preparers. + +## v10.1.2 + +- Corrected comment for auth.NewAuthorizerFromFile() function. + +## v10.1.1 + +- Updated version number to match current release. + +## v10.1.0 + +### New Features + +- Expose the polling URL for futures. + +### Bug Fixes + +- Add validation.NewErrorWithValidationError back to prevent breaking changes (it is deprecated). + +## v10.0.0 + +### New Features + +- Added target and innererror fields to ServiceError to comply with OData v4 spec. +- The Done() method on futures will now return a ServiceError object when available (it used to return a partial value of such errors). +- Added helper methods for obtaining authorizers. +- Expose the polling URL for futures. + +### Bug Fixes + +- Switched from glide to dep for dependency management. +- Fixed unmarshaling of ServiceError for JSON bodies that don't conform to the OData spec. +- Fixed a race condition in token refresh. + +### Breaking Changes + +- The ServiceError.Details field type has been changed to match the OData v4 spec. +- Go v1.7 has been dropped from CI. +- API parameter validation failures will now return a unique error type validation.Error. +- The adal.Token type has been decomposed from adal.ServicePrincipalToken (this was necessary in order to fix the token refresh race). + +## v9.10.0 + +- Fix the Service Bus suffix in Azure public env +- Add Service Bus Endpoint (AAD ResourceURI) for use in [Azure Service Bus RBAC Preview](https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-role-based-access-control) + +## v9.9.0 + +### New Features + +- Added EventGridKeyAuthorizer for key authorization with event grid topics. + +### Bug Fixes + +- Fixed race condition when auto-refreshing service principal tokens. + +## v9.8.1 + +### Bug Fixes + +- Added http.StatusNoContent (204) to the list of expected status codes for long-running operations. +- Updated runtime version info so it's current. + +## v9.8.0 + +### New Features + +- Added type azure.AsyncOpIncompleteError to be returned from a future's Result() method when the operation has not completed. + +## v9.7.1 + +### Bug Fixes + +- Use correct AAD and Graph endpoints for US Gov environment. + +## v9.7.0 + +### New Features + +- Added support for application/octet-stream MIME types. + +## v9.6.1 + +### Bug Fixes + +- Ensure Authorization header is added to request when polling for registration status. + +## v9.6.0 + +### New Features + +- Added support for acquiring tokens via MSI with a user assigned identity. + +## v9.5.3 + +### Bug Fixes + +- Don't remove encoding of existing URL Query parameters when calling autorest.WithQueryParameters. +- Set correct Content Type when using autorest.WithFormData. + +## v9.5.2 + +### Bug Fixes + +- Check for nil \*http.Response before dereferencing it. + +## v9.5.1 + +### Bug Fixes + +- Don't count http.StatusTooManyRequests (429) against the retry cap. +- Use retry logic when SkipResourceProviderRegistration is set to true. + +## v9.5.0 + +### New Features + +- Added support for username + password, API key, authoriazation code and cognitive services authentication. +- Added field SkipResourceProviderRegistration to clients to provide a way to skip auto-registration of RPs. +- Added utility function AsStringSlice() to convert its parameters to a string slice. + +### Bug Fixes + +- When checking for authentication failures look at the error type not the status code as it could vary. + +## v9.4.2 + +### Bug Fixes + +- Validate parameters when creating credentials. +- Don't retry requests if the returned status is a 401 (http.StatusUnauthorized) as it will never succeed. + +## v9.4.1 + +### Bug Fixes + +- Update the AccessTokensPath() to read access tokens path through AZURE_ACCESS_TOKEN_FILE. If this + environment variable is not set, it will fall back to use default path set by Azure CLI. +- Use case-insensitive string comparison for polling states. + +## v9.4.0 + +### New Features + +- Added WaitForCompletion() to Future as a default polling implementation. + +### Bug Fixes + +- Method Future.Done() shouldn't update polling status for unexpected HTTP status codes. + +## v9.3.1 + +### Bug Fixes + +- DoRetryForStatusCodes will retry if sender.Do returns a non-nil error. + +## v9.3.0 + +### New Features + +- Added PollingMethod() to Future so callers know what kind of polling mechanism is used. +- Added azure.ChangeToGet() which transforms an http.Request into a GET (to be used with LROs). + +## v9.2.0 + +### New Features + +- Added support for custom Azure Stack endpoints. +- Added type azure.Future used to track the status of long-running operations. + +### Bug Fixes + +- Preserve the original error in DoRetryWithRegistration when registration fails. + +## v9.1.1 + +- Fixes a bug regarding the cookie jar on `autorest.Client.Sender`. + +## v9.1.0 + +### New Features + +- In cases where there is a non-empty error from the service, attempt to unmarshal it instead of uniformly calling it an "Unknown" error. +- Support for loading Azure CLI Authentication files. +- Automatically register your subscription with the Azure Resource Provider if it hadn't been previously. + +### Bug Fixes + +- RetriableRequest can now tolerate a ReadSeekable body being read but not reset. +- Adding missing Apache Headers + +## v9.0.0 + +> **IMPORTANT:** This release was intially labeled incorrectly as `v8.4.0`. From the time it was released, it should have been marked `v9.0.0` because it contains breaking changes to the MSI packages. We appologize for any inconvenience this causes. + +Adding MSI Endpoint Support and CLI token rehydration. + +## v8.3.1 + +Pick up bug fix in adal for MSI support. + +## v8.3.0 + +Updates to Error string formats for clarity. Also, adding a copy of the http.Response to errors for an improved debugging experience. + +## v8.2.0 + +### New Features + +- Add support for bearer authentication callbacks +- Support 429 response codes that include "Retry-After" header +- Support validation constraint "Pattern" for map keys + +### Bug Fixes + +- Make RetriableRequest work with multiple versions of Go + +## v8.1.1 + +Updates the RetriableRequest to take advantage of GetBody() added in Go 1.8. + +## v8.1.0 + +Adds RetriableRequest type for more efficient handling of retrying HTTP requests. + +## v8.0.0 + +ADAL refactored into its own package. +Support for UNIX time. + +## v7.3.1 + +- Version Testing now removed from production bits that are shipped with the library. + +## v7.3.0 + +- Exposing new `RespondDecorator`, `ByDiscardingBody`. This allows operations + to acknowledge that they do not need either the entire or a trailing portion + of accepts response body. In doing so, Go's http library can reuse HTTP + connections more readily. +- Adding `PrepareDecorator` to target custom BaseURLs. +- Adding ACR suffix to public cloud environment. +- Updating Glide dependencies. + +## v7.2.5 + +- Fixed the Active Directory endpoint for the China cloud. +- Removes UTF-8 BOM if present in response payload. +- Added telemetry. + +## v7.2.3 + +- Fixing bug in calls to `DelayForBackoff` that caused doubling of delay + duration. + +## v7.2.2 + +- autorest/azure: added ASM and ARM VM DNS suffixes. + +## v7.2.1 + +- fixed parsing of UTC times that are not RFC3339 conformant. + +## v7.2.0 + +- autorest/validation: Reformat validation error for better error message. + +## v7.1.0 + +- preparer: Added support for multipart formdata - WithMultiPartFormdata() +- preparer: Added support for sending file in request body - WithFile +- client: Added RetryDuration parameter. +- autorest/validation: new package for validation code for Azure Go SDK. + +## v7.0.7 + +- Add trailing / to endpoint +- azure: add EnvironmentFromName + +## v7.0.6 + +- Add retry logic for 408, 500, 502, 503 and 504 status codes. +- Change url path and query encoding logic. +- Fix DelayForBackoff for proper exponential delay. +- Add CookieJar in Client. + +## v7.0.5 + +- Add check to start polling only when status is in [200,201,202]. +- Refactoring for unchecked errors. +- azure/persist changes. +- Fix 'file in use' issue in renewing token in deviceflow. +- Store header RetryAfter for subsequent requests in polling. +- Add attribute details in service error. + +## v7.0.4 + +- Better error messages for long running operation failures + +## v7.0.3 + +- Corrected DoPollForAsynchronous to properly handle the initial response + +## v7.0.2 + +- Corrected DoPollForAsynchronous to continue using the polling method first discovered + +## v7.0.1 + +- Fixed empty JSON input error in ByUnmarshallingJSON +- Fixed polling support for GET calls +- Changed format name from TimeRfc1123 to TimeRFC1123 + +## v7.0.0 + +- Added ByCopying responder with supporting TeeReadCloser +- Rewrote Azure asynchronous handling +- Reverted to only unmarshalling JSON +- Corrected handling of RFC3339 time strings and added support for Rfc1123 time format + +The `json.Decoder` does not catch bad data as thoroughly as `json.Unmarshal`. Since +`encoding/json` successfully deserializes all core types, and extended types normally provide +their custom JSON serialization handlers, the code has been reverted back to using +`json.Unmarshal`. The original change to use `json.Decode` was made to reduce duplicate +code; there is no loss of function, and there is a gain in accuracy, by reverting. + +Additionally, Azure services indicate requests to be polled by multiple means. The existing code +only checked for one of those (that is, the presence of the `Azure-AsyncOperation` header). +The new code correctly covers all cases and aligns with the other Azure SDKs. + +## v6.1.0 + +- Introduced `date.ByUnmarshallingJSONDate` and `date.ByUnmarshallingJSONTime` to enable JSON encoded values. + +## v6.0.0 + +- Completely reworked the handling of polled and asynchronous requests +- Removed unnecessary routines +- Reworked `mocks.Sender` to replay a series of `http.Response` objects +- Added `PrepareDecorators` for primitive types (e.g., bool, int32) + +Handling polled and asynchronous requests is no longer part of `Client#Send`. Instead new +`SendDecorators` implement different styles of polled behavior. See`autorest.DoPollForStatusCodes` +and `azure.DoPollForAsynchronous` for examples. + +## v5.0.0 + +- Added new RespondDecorators unmarshalling primitive types +- Corrected application of inspection and authorization PrependDecorators + +## v4.0.0 + +- Added support for Azure long-running operations. +- Added cancelation support to all decorators and functions that may delay. +- Breaking: `DelayForBackoff` now accepts a channel, which may be nil. + +## v3.1.0 + +- Add support for OAuth Device Flow authorization. +- Add support for ServicePrincipalTokens that are backed by an existing token, rather than other secret material. +- Add helpers for persisting and restoring Tokens. +- Increased code coverage in the github.com/Azure/autorest/azure package + +## v3.0.0 + +- Breaking: `NewErrorWithError` no longer takes `statusCode int`. +- Breaking: `NewErrorWithStatusCode` is replaced with `NewErrorWithResponse`. +- Breaking: `Client#Send()` no longer takes `codes ...int` argument. +- Add: XML unmarshaling support with `ByUnmarshallingXML()` +- Stopped vending dependencies locally and switched to [Glide](https://github.com/Masterminds/glide). + Applications using this library should either use Glide or vendor dependencies locally some other way. +- Add: `azure.WithErrorUnlessStatusCode()` decorator to handle Azure errors. +- Fix: use `net/http.DefaultClient` as base client. +- Fix: Missing inspection for polling responses added. +- Add: CopyAndDecode helpers. +- Improved `./autorest/to` with `[]string` helpers. +- Removed golint suppressions in .travis.yml. + +## v2.1.0 + +- Added `StatusCode` to `Error` for more easily obtaining the HTTP Reponse StatusCode (if any) + +## v2.0.0 + +- Changed `to.StringMapPtr` method signature to return a pointer +- Changed `ServicePrincipalCertificateSecret` and `NewServicePrincipalTokenFromCertificate` to support generic certificate and private keys + +## v1.0.0 + +- Added Logging inspectors to trace http.Request / Response +- Added support for User-Agent header +- Changed WithHeader PrepareDecorator to use set vs. add +- Added JSON to error when unmarshalling fails +- Added Client#Send method +- Corrected case of "Azure" in package paths +- Added "to" helpers, Azure helpers, and improved ease-of-use +- Corrected golint issues + +## v1.0.1 + +- Added CHANGELOG.md + +## v1.1.0 + +- Added mechanism to retrieve a ServicePrincipalToken using a certificate-signed JWT +- Added an example of creating a certificate-based ServicePrincipal and retrieving an OAuth token using the certificate + +## v1.1.1 + +- Introduce godeps and vendor dependencies introduced in v1.1.1 diff --git a/vendor/github.com/Azure/go-autorest/GNUmakefile b/vendor/github.com/Azure/go-autorest/GNUmakefile new file mode 100644 index 000000000..a434e73ac --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/GNUmakefile @@ -0,0 +1,23 @@ +DIR?=./autorest/ + +default: build + +build: fmt + go install $(DIR) + +test: + go test $(DIR) || exit 1 + +vet: + @echo "go vet ." + @go vet $(DIR)... ; if [ $$? -eq 1 ]; then \ + echo ""; \ + echo "Vet found suspicious constructs. Please check the reported constructs"; \ + echo "and fix them if necessary before submitting the code for review."; \ + exit 1; \ + fi + +fmt: + gofmt -w $(DIR) + +.PHONY: build test vet fmt diff --git a/vendor/github.com/Azure/go-autorest/Gopkg.lock b/vendor/github.com/Azure/go-autorest/Gopkg.lock new file mode 100644 index 000000000..dc6e3e633 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/Gopkg.lock @@ -0,0 +1,324 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:892e39e5c083d0943f1e80ab8351690f183c6a5ab24e1d280adcad424c26255e" + name = "contrib.go.opencensus.io/exporter/ocagent" + packages = ["."] + pruneopts = "UT" + revision = "a8a6f458bbc1d5042322ad1f9b65eeb0b69be9ea" + version = "v0.6.0" + +[[projects]] + digest = "1:8f5acd4d4462b5136af644d25101f0968a7a94ee90fcb2059cec5b7cc42e0b20" + name = "github.com/census-instrumentation/opencensus-proto" + packages = [ + "gen-go/agent/common/v1", + "gen-go/agent/metrics/v1", + "gen-go/agent/trace/v1", + "gen-go/metrics/v1", + "gen-go/resource/v1", + "gen-go/trace/v1", + ] + pruneopts = "UT" + revision = "d89fa54de508111353cb0b06403c00569be780d8" + version = "v0.2.1" + +[[projects]] + digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "UT" + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" + +[[projects]] + digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55" + name = "github.com/dgrijalva/jwt-go" + packages = ["."] + pruneopts = "UT" + revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" + version = "v3.2.0" + +[[projects]] + digest = "1:cf0d2e435fd4ce45b789e93ef24b5f08e86be0e9807a16beb3694e2d8c9af965" + name = "github.com/dimchansky/utfbom" + packages = ["."] + pruneopts = "UT" + revision = "d2133a1ce379ef6fa992b0514a77146c60db9d1c" + version = "v1.1.0" + +[[projects]] + branch = "master" + digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8" + name = "github.com/golang/groupcache" + packages = ["lru"] + pruneopts = "UT" + revision = "611e8accdfc92c4187d399e95ce826046d4c8d73" + +[[projects]] + digest = "1:e3839df32927e8d3403cd5aa7253d966e8ff80fc8f10e2e35d146461cd83fcfa" + name = "github.com/golang/protobuf" + packages = [ + "descriptor", + "jsonpb", + "proto", + "protoc-gen-go/descriptor", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/struct", + "ptypes/timestamp", + "ptypes/wrappers", + ] + pruneopts = "UT" + revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7" + version = "v1.3.2" + +[[projects]] + digest = "1:c560cd79300fac84f124b96225181a637a70b60155919a3c36db50b7cca6b806" + name = "github.com/grpc-ecosystem/grpc-gateway" + packages = [ + "internal", + "runtime", + "utilities", + ] + pruneopts = "UT" + revision = "f7120437bb4f6c71f7f5076ad65a45310de2c009" + version = "v1.12.1" + +[[projects]] + digest = "1:5d231480e1c64a726869bc4142d270184c419749d34f167646baa21008eb0a79" + name = "github.com/mitchellh/go-homedir" + packages = ["."] + pruneopts = "UT" + revision = "af06845cf3004701891bf4fdb884bfe4920b3727" + version = "v1.1.0" + +[[projects]] + digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + pruneopts = "UT" + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + digest = "1:99d32780e5238c2621fff621123997c3e3cca96db8be13179013aea77dfab551" + name = "github.com/stretchr/testify" + packages = [ + "assert", + "require", + ] + pruneopts = "UT" + revision = "221dbe5ed46703ee255b1da0dec05086f5035f62" + version = "v1.4.0" + +[[projects]] + digest = "1:7c5e00383399fe13de0b4b65c9fdde16275407ce8ac02d867eafeaa916edcc71" + name = "go.opencensus.io" + packages = [ + ".", + "internal", + "internal/tagencoding", + "metric/metricdata", + "metric/metricproducer", + "plugin/ocgrpc", + "plugin/ochttp", + "plugin/ochttp/propagation/b3", + "plugin/ochttp/propagation/tracecontext", + "resource", + "stats", + "stats/internal", + "stats/view", + "tag", + "trace", + "trace/internal", + "trace/propagation", + "trace/tracestate", + ] + pruneopts = "UT" + revision = "aad2c527c5defcf89b5afab7f37274304195a6b2" + version = "v0.22.2" + +[[projects]] + branch = "master" + digest = "1:f604f5e2ee721b6757d962dfe7bab4f28aae50c456e39cfb2f3819762a44a6ae" + name = "golang.org/x/crypto" + packages = [ + "pkcs12", + "pkcs12/internal/rc2", + ] + pruneopts = "UT" + revision = "e9b2fee46413994441b28dfca259d911d963dfed" + +[[projects]] + branch = "master" + digest = "1:334b27eac455cb6567ea28cd424230b07b1a64334a2f861a8075ac26ce10af43" + name = "golang.org/x/lint" + packages = [ + ".", + "golint", + ] + pruneopts = "UT" + revision = "fdd1cda4f05fd1fd86124f0ef9ce31a0b72c8448" + +[[projects]] + branch = "master" + digest = "1:257a75d024975428ab9192bfc334c3490882f8cb21322ea5784ca8eca000a910" + name = "golang.org/x/net" + packages = [ + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "trace", + ] + pruneopts = "UT" + revision = "1ddd1de85cb0337b623b740a609d35817d516a8d" + +[[projects]] + branch = "master" + digest = "1:382bb5a7fb4034db3b6a2d19e5a4a6bcf52f4750530603c01ca18a172fa3089b" + name = "golang.org/x/sync" + packages = ["semaphore"] + pruneopts = "UT" + revision = "cd5d95a43a6e21273425c7ae415d3df9ea832eeb" + +[[projects]] + branch = "master" + digest = "1:4da420ceda5f68e8d748aa2169d0ed44ffadb1bbd6537cf778a49563104189b8" + name = "golang.org/x/sys" + packages = ["unix"] + pruneopts = "UT" + revision = "ce4227a45e2eb77e5c847278dcc6a626742e2945" + +[[projects]] + digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/language", + "internal/language/compact", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + ] + pruneopts = "UT" + revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475" + version = "v0.3.2" + +[[projects]] + branch = "master" + digest = "1:4eb5ea8395fb60212dd58b92c9db80bab59d5e99c7435f9a6a0a528c373b60e7" + name = "golang.org/x/tools" + packages = [ + "go/ast/astutil", + "go/gcexportdata", + "go/internal/gcimporter", + "go/types/typeutil", + ] + pruneopts = "UT" + revision = "259af5ff87bdcd4abf2ecda8edc3f13f04f26a42" + +[[projects]] + digest = "1:964bb30febc27fabfbec4759fa530c6ec35e77a7c85fed90b9317ea39a054877" + name = "google.golang.org/api" + packages = ["support/bundler"] + pruneopts = "UT" + revision = "8a410c21381766a810817fd6200fce8838ecb277" + version = "v0.14.0" + +[[projects]] + branch = "master" + digest = "1:a8d5c2c6e746b3485e36908ab2a9e3d77b86b81f8156d88403c7d2b462431dfd" + name = "google.golang.org/genproto" + packages = [ + "googleapis/api/httpbody", + "googleapis/rpc/status", + "protobuf/field_mask", + ] + pruneopts = "UT" + revision = "51378566eb590fa106d1025ea12835a4416dda84" + +[[projects]] + digest = "1:b59ce3ddb11daeeccccc9cb3183b58ebf8e9a779f1c853308cd91612e817a301" + name = "google.golang.org/grpc" + packages = [ + ".", + "backoff", + "balancer", + "balancer/base", + "balancer/roundrobin", + "binarylog/grpc_binarylog_v1", + "codes", + "connectivity", + "credentials", + "credentials/internal", + "encoding", + "encoding/proto", + "grpclog", + "internal", + "internal/backoff", + "internal/balancerload", + "internal/binarylog", + "internal/buffer", + "internal/channelz", + "internal/envconfig", + "internal/grpcrand", + "internal/grpcsync", + "internal/resolver/dns", + "internal/resolver/passthrough", + "internal/syscall", + "internal/transport", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "serviceconfig", + "stats", + "status", + "tap", + ] + pruneopts = "UT" + revision = "1a3960e4bd028ac0cec0a2afd27d7d8e67c11514" + version = "v1.25.1" + +[[projects]] + digest = "1:b75b3deb2bce8bc079e16bb2aecfe01eb80098f5650f9e93e5643ca8b7b73737" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "UT" + revision = "1f64d6156d11335c3f22d9330b0ad14fc1e789ce" + version = "v2.2.7" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "contrib.go.opencensus.io/exporter/ocagent", + "github.com/dgrijalva/jwt-go", + "github.com/dimchansky/utfbom", + "github.com/mitchellh/go-homedir", + "github.com/stretchr/testify/require", + "go.opencensus.io/plugin/ochttp", + "go.opencensus.io/plugin/ochttp/propagation/tracecontext", + "go.opencensus.io/stats/view", + "go.opencensus.io/trace", + "golang.org/x/crypto/pkcs12", + "golang.org/x/lint/golint", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/Azure/go-autorest/Gopkg.toml b/vendor/github.com/Azure/go-autorest/Gopkg.toml new file mode 100644 index 000000000..1fc286596 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/Gopkg.toml @@ -0,0 +1,59 @@ +# Gopkg.toml example +# +# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + +required = ["golang.org/x/lint/golint"] + +[prune] + go-tests = true + unused-packages = true + +[[constraint]] + name = "contrib.go.opencensus.io/exporter/ocagent" + version = "0.6.0" + +[[constraint]] + name = "github.com/dgrijalva/jwt-go" + version = "3.2.0" + +[[constraint]] + name = "github.com/dimchansky/utfbom" + version = "1.1.0" + +[[constraint]] + name = "github.com/mitchellh/go-homedir" + version = "1.1.0" + +[[constraint]] + name = "github.com/stretchr/testify" + version = "1.3.0" + +[[constraint]] + name = "go.opencensus.io" + version = "0.22.0" + +[[constraint]] + branch = "master" + name = "golang.org/x/crypto" diff --git a/vendor/github.com/Azure/go-autorest/LICENSE b/vendor/github.com/Azure/go-autorest/LICENSE new file mode 100644 index 000000000..b9d6a27ea --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/README.md b/vendor/github.com/Azure/go-autorest/README.md new file mode 100644 index 000000000..de1e19a44 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/README.md @@ -0,0 +1,165 @@ +# go-autorest + +[![GoDoc](https://godoc.org/github.com/Azure/go-autorest/autorest?status.png)](https://godoc.org/github.com/Azure/go-autorest/autorest) +[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/go/Azure.go-autorest?branchName=master)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=625&branchName=master) +[![Go Report Card](https://goreportcard.com/badge/Azure/go-autorest)](https://goreportcard.com/report/Azure/go-autorest) + +Package go-autorest provides an HTTP request client for use with [Autorest](https://github.com/Azure/autorest.go)-generated API client packages. + +An authentication client tested with Azure Active Directory (AAD) is also +provided in this repo in the package +`github.com/Azure/go-autorest/autorest/adal`. Despite its name, this package +is maintained only as part of the Azure Go SDK and is not related to other +"ADAL" libraries in [github.com/AzureAD](https://github.com/AzureAD). + +## Overview + +Package go-autorest implements an HTTP request pipeline suitable for use across +multiple goroutines and provides the shared routines used by packages generated +by [Autorest](https://github.com/Azure/autorest.go). + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + +```go + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) +``` + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + +```go + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) +``` + +will set the URL to: + +``` + https://microsoft.com/a/b/c +``` + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., `ByUnmarshallingJson`) is likely incorrect. + +Errors raised by autorest objects and methods will conform to the `autorest.Error` interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. + +## Helpers + +### Handling Swagger Dates + +The Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure correct +parsing and formatting. + +### Handling Empty Values + +In JSON, missing values have different semantics than empty values. This is especially true for +services using the HTTP PATCH verb. The JSON submitted with a PATCH request generally contains +only those values to modify. Missing values are to be left unchanged. Developers, then, require a +means to both specify an empty value and to leave the value out of the submitted JSON. + +The Go JSON package (`encoding/json`) supports the `omitempty` tag. When specified, it omits +empty values from the rendered JSON. Since Go defines default values for all base types (such as "" +for string and 0 for int) and provides no means to mark a value as actually empty, the JSON package +treats default values as meaning empty, omitting them from the rendered JSON. This means that, using +the Go base types encoded through the default JSON package, it is not possible to create JSON to +clear a value at the server. + +The workaround within the Go community is to use pointers to base types in lieu of base types within +structures that map to JSON. For example, instead of a value of type `string`, the workaround uses +`*string`. While this enables distinguishing empty values from those to be unchanged, creating +pointers to a base type (notably constant, in-line values) requires additional variables. This, for +example, + +```go + s := struct { + S *string + }{ S: &"foo" } +``` +fails, while, this + +```go + v := "foo" + s := struct { + S *string + }{ S: &v } +``` +succeeds. + +To ease using pointers, the subpackage `to` contains helpers that convert to and from pointers for +Go base types which have Swagger analogs. It also provides a helper that converts between +`map[string]string` and `map[string]*string`, enabling the JSON to specify that the value +associated with a key should be cleared. With the helpers, the previous example becomes + +```go + s := struct { + S *string + }{ S: to.StringPtr("foo") } +``` + +## Install + +```bash +go get github.com/Azure/go-autorest/autorest +go get github.com/Azure/go-autorest/autorest/azure +go get github.com/Azure/go-autorest/autorest/date +go get github.com/Azure/go-autorest/autorest/to +``` + +### Using with Go Modules +In [v12.0.1](https://github.com/Azure/go-autorest/pull/386), this repository introduced the following modules. + +- autorest/adal +- autorest/azure/auth +- autorest/azure/cli +- autorest/date +- autorest/mocks +- autorest/to +- autorest/validation +- autorest +- logger +- tracing + +Tagging cumulative SDK releases as a whole (e.g. `v12.3.0`) is still enabled to support consumers of this repo that have not yet migrated to modules. + +## License + +See LICENSE file. + +----- + +This project has adopted the [Microsoft Open Source Code of +Conduct](https://opensource.microsoft.com/codeofconduct/). For more information +see the [Code of Conduct +FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact +[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional +questions or comments. diff --git a/vendor/github.com/Azure/go-autorest/autorest/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/LICENSE new file mode 100644 index 000000000..b9d6a27ea --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE new file mode 100644 index 000000000..b9d6a27ea --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md new file mode 100644 index 000000000..97434ea7f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md @@ -0,0 +1,294 @@ +# NOTE: This module will go out of support by March 31, 2023. For authenticating with Azure AD, use module [azidentity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity) instead. For help migrating from `adal` to `azidentiy` please consult the [migration guide](https://aka.ms/azsdk/go/identity/migration). General information about the retirement of this and other legacy modules can be found [here](https://azure.microsoft.com/updates/support-for-azure-sdk-libraries-that-do-not-conform-to-our-current-azure-sdk-guidelines-will-be-retired-as-of-31-march-2023/). + +# Azure Active Directory authentication for Go + +This is a standalone package for authenticating with Azure Active +Directory from other Go libraries and applications, in particular the [Azure SDK +for Go](https://github.com/Azure/azure-sdk-for-go). + +Note: Despite the package's name it is not related to other "ADAL" libraries +maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues +should be opened in [this repo's](https://github.com/Azure/go-autorest/issues) +or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue +trackers. + +## Install + +```bash +go get -u github.com/Azure/go-autorest/autorest/adal +``` + +## Usage + +An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). + +### Register an Azure AD Application with secret + + +1. Register a new application with a `secret` credential + + ``` + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --password secret + ``` + +2. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "Application ID" + ``` + + * Replace `Application ID` with `appId` from step 1. + +### Register an Azure AD Application with certificate + +1. Create a private key + + ``` + openssl genrsa -out "example-app.key" 2048 + ``` + +2. Create the certificate + + ``` + openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr" + openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000 + ``` + +3. Create the PKCS12 version of the certificate containing also the private key + + ``` + openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass: + + ``` + +4. Register a new application with the certificate content form `example-app.crt` + + ``` + certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)" + + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --key-usage Verify --end-date 2018-01-01 \ + --key-value "${certificateContents}" + ``` + +5. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "APPLICATION_ID" + ``` + + * Replace `APPLICATION_ID` with `appId` from step 4. + + +### Grant the necessary permissions + +Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained +level. There is a set of [pre-defined roles](https://docs.microsoft.com/azure/active-directory/role-based-access-built-in-roles) +which can be assigned to a service principal of an Azure AD application depending of your needs. + +``` +az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME" +``` + +* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step. +* Replace the `ROLE_NAME` with a role name of your choice. + +It is also possible to define custom role definitions. + +``` +az role definition create --role-definition role-definition.json +``` + +* Check [custom roles](https://docs.microsoft.com/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. + + +### Acquire Access Token + +The common configuration used by all flows: + +```Go +const activeDirectoryEndpoint = "https://login.microsoftonline.com/" +tenantID := "TENANT_ID" +oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) + +applicationID := "APPLICATION_ID" + +callback := func(token adal.Token) error { + // This is called after the token is acquired +} + +// The resource for which the token is acquired +resource := "https://management.core.windows.net/" +``` + +* Replace the `TENANT_ID` with your tenant ID. +* Replace the `APPLICATION_ID` with the value from previous section. + +#### Client Credentials + +```Go +applicationSecret := "APPLICATION_SECRET" + +spt, err := adal.NewServicePrincipalToken( + *oauthConfig, + appliationID, + applicationSecret, + resource, + callbacks...) +if err != nil { + return nil, err +} + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Replace the `APPLICATION_SECRET` with the `password` value from previous section. + +#### Client Certificate + +```Go +certificatePath := "./example-app.pfx" + +certData, err := os.ReadFile(certificatePath) +if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) +} + +// Get the certificate and private key from pfx file +certificate, rsaPrivateKey, err := decodePkcs12(certData, "") +if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) +} + +spt, err := adal.NewServicePrincipalTokenFromCertificate( + *oauthConfig, + applicationID, + certificate, + rsaPrivateKey, + resource, + callbacks...) + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Update the certificate path to point to the example-app.pfx file which was created in previous section. + + +#### Device Code + +```Go +oauthClient := &http.Client{} + +// Acquire the device code +deviceCode, err := adal.InitiateDeviceAuth( + oauthClient, + *oauthConfig, + applicationID, + resource) +if err != nil { + return nil, fmt.Errorf("Failed to start device auth flow: %s", err) +} + +// Display the authentication message +fmt.Println(*deviceCode.Message) + +// Wait here until the user is authenticated +token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) +if err != nil { + return nil, fmt.Errorf("Failed to finish device auth flow: %s", err) +} + +spt, err := adal.NewServicePrincipalTokenFromManualToken( + *oauthConfig, + applicationID, + resource, + *token, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Username password authenticate + +```Go +spt, err := adal.NewServicePrincipalTokenFromUsernamePassword( + *oauthConfig, + applicationID, + username, + password, + resource, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Authorization code authenticate + +``` Go +spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode( + *oauthConfig, + applicationID, + clientSecret, + authorizationCode, + redirectURI, + resource, + callbacks...) + +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +### Command Line Tool + +A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above. + +``` +adal -h + +Usage of ./adal: + -applicationId string + application id + -certificatePath string + path to pk12/PFC application certificate + -mode string + authentication mode (device, secret, cert, refresh) (default "device") + -resource string + resource for which the token is requested + -secret string + application secret + -tenantId string + tenant id + -tokenCachePath string + location of oath token cache (default "/home/cgc/.adal/accessToken.json") +``` + +Example acquire a token for `https://management.core.windows.net/` using device code flow: + +``` +adal -mode device \ + -applicationId "APPLICATION_ID" \ + -tenantId "TENANT_ID" \ + -resource https://management.core.windows.net/ + +``` diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go new file mode 100644 index 000000000..fa5964742 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go @@ -0,0 +1,151 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "fmt" + "net/url" +) + +const ( + activeDirectoryEndpointTemplate = "%s/oauth2/%s%s" +) + +// OAuthConfig represents the endpoints needed +// in OAuth operations +type OAuthConfig struct { + AuthorityEndpoint url.URL `json:"authorityEndpoint"` + AuthorizeEndpoint url.URL `json:"authorizeEndpoint"` + TokenEndpoint url.URL `json:"tokenEndpoint"` + DeviceCodeEndpoint url.URL `json:"deviceCodeEndpoint"` +} + +// IsZero returns true if the OAuthConfig object is zero-initialized. +func (oac OAuthConfig) IsZero() bool { + return oac == OAuthConfig{} +} + +func validateStringParam(param, name string) error { + if len(param) == 0 { + return fmt.Errorf("parameter '" + name + "' cannot be empty") + } + return nil +} + +// NewOAuthConfig returns an OAuthConfig with tenant specific urls +func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { + apiVer := "1.0" + return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer) +} + +// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls. +// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value. +func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) { + if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil { + return nil, err + } + api := "" + // it's legal for tenantID to be empty so don't validate it + if apiVersion != nil { + if err := validateStringParam(*apiVersion, "apiVersion"); err != nil { + return nil, err + } + api = fmt.Sprintf("?api-version=%s", *apiVersion) + } + u, err := url.Parse(activeDirectoryEndpoint) + if err != nil { + return nil, err + } + authorityURL, err := u.Parse(tenantID) + if err != nil { + return nil, err + } + authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api)) + if err != nil { + return nil, err + } + tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api)) + if err != nil { + return nil, err + } + deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api)) + if err != nil { + return nil, err + } + + return &OAuthConfig{ + AuthorityEndpoint: *authorityURL, + AuthorizeEndpoint: *authorizeURL, + TokenEndpoint: *tokenURL, + DeviceCodeEndpoint: *deviceCodeURL, + }, nil +} + +// MultiTenantOAuthConfig provides endpoints for primary and aulixiary tenant IDs. +type MultiTenantOAuthConfig interface { + PrimaryTenant() *OAuthConfig + AuxiliaryTenants() []*OAuthConfig +} + +// OAuthOptions contains optional OAuthConfig creation arguments. +type OAuthOptions struct { + APIVersion string +} + +func (c OAuthOptions) apiVersion() string { + if c.APIVersion != "" { + return fmt.Sprintf("?api-version=%s", c.APIVersion) + } + return "1.0" +} + +// NewMultiTenantOAuthConfig creates an object that support multitenant OAuth configuration. +// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/authenticate-multi-tenant for more information. +func NewMultiTenantOAuthConfig(activeDirectoryEndpoint, primaryTenantID string, auxiliaryTenantIDs []string, options OAuthOptions) (MultiTenantOAuthConfig, error) { + if len(auxiliaryTenantIDs) == 0 || len(auxiliaryTenantIDs) > 3 { + return nil, errors.New("must specify one to three auxiliary tenants") + } + mtCfg := multiTenantOAuthConfig{ + cfgs: make([]*OAuthConfig, len(auxiliaryTenantIDs)+1), + } + apiVer := options.apiVersion() + pri, err := NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, primaryTenantID, &apiVer) + if err != nil { + return nil, fmt.Errorf("failed to create OAuthConfig for primary tenant: %v", err) + } + mtCfg.cfgs[0] = pri + for i := range auxiliaryTenantIDs { + aux, err := NewOAuthConfig(activeDirectoryEndpoint, auxiliaryTenantIDs[i]) + if err != nil { + return nil, fmt.Errorf("failed to create OAuthConfig for tenant '%s': %v", auxiliaryTenantIDs[i], err) + } + mtCfg.cfgs[i+1] = aux + } + return mtCfg, nil +} + +type multiTenantOAuthConfig struct { + // first config in the slice is the primary tenant + cfgs []*OAuthConfig +} + +func (m multiTenantOAuthConfig) PrimaryTenant() *OAuthConfig { + return m.cfgs[0] +} + +func (m multiTenantOAuthConfig) AuxiliaryTenants() []*OAuthConfig { + return m.cfgs[1:] +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go new file mode 100644 index 000000000..f040e2ac6 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go @@ -0,0 +1,273 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + This file is largely based on rjw57/oauth2device's code, with the follow differences: + * scope -> resource, and only allow a single one + * receive "Message" in the DeviceCode struct and show it to users as the prompt + * azure-xplat-cli has the following behavior that this emulates: + - does not send client_secret during the token exchange + - sends resource again in the token exchange request +*/ + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" +) + +const ( + logPrefix = "autorest/adal/devicetoken:" +) + +var ( + // ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow + ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix) + + // ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow + ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix) + + // ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow + ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix) + + // ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow + ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix) + + // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow + ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) + + // ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow + ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix) + + // ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow + ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix) + + errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" + errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" + errTokenSendingFails = "Error occurred while sending request with device code for a token" + errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" + errStatusNotOK = "Error HTTP status != 200" +) + +// DeviceCode is the object returned by the device auth endpoint +// It contains information to instruct the user to complete the auth flow +type DeviceCode struct { + DeviceCode *string `json:"device_code,omitempty"` + UserCode *string `json:"user_code,omitempty"` + VerificationURL *string `json:"verification_url,omitempty"` + ExpiresIn *int64 `json:"expires_in,string,omitempty"` + Interval *int64 `json:"interval,string,omitempty"` + + Message *string `json:"message"` // Azure specific + Resource string // store the following, stored when initiating, used when exchanging + OAuthConfig OAuthConfig + ClientID string +} + +// TokenError is the object returned by the token exchange endpoint +// when something is amiss +type TokenError struct { + Error *string `json:"error,omitempty"` + ErrorCodes []int `json:"error_codes,omitempty"` + ErrorDescription *string `json:"error_description,omitempty"` + Timestamp *string `json:"timestamp,omitempty"` + TraceID *string `json:"trace_id,omitempty"` +} + +// DeviceToken is the object return by the token exchange endpoint +// It can either look like a Token or an ErrorToken, so put both here +// and check for presence of "Error" to know if we are in error state +type deviceToken struct { + Token + TokenError +} + +// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode +// that can be used with CheckForUserCompletion or WaitForUserCompletion. +// Deprecated: use InitiateDeviceAuthWithContext() instead. +func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + return InitiateDeviceAuthWithContext(context.Background(), sender, oauthConfig, clientID, resource) +} + +// InitiateDeviceAuthWithContext initiates a device auth flow. It returns a DeviceCode +// that can be used with CheckForUserCompletion or WaitForUserCompletion. +func InitiateDeviceAuthWithContext(ctx context.Context, sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + v := url.Values{ + "client_id": []string{clientID}, + "resource": []string{resource}, + } + + s := v.Encode() + body := io.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req.WithContext(ctx)) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK) + } + + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrDeviceCodeEmpty + } + + var code DeviceCode + err = json.Unmarshal(rb, &code) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + code.ClientID = clientID + code.Resource = resource + code.OAuthConfig = oauthConfig + + return &code, nil +} + +// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint +// to see if the device flow has: been completed, timed out, or otherwise failed +// Deprecated: use CheckForUserCompletionWithContext() instead. +func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + return CheckForUserCompletionWithContext(context.Background(), sender, code) +} + +// CheckForUserCompletionWithContext takes a DeviceCode and checks with the Azure AD OAuth endpoint +// to see if the device flow has: been completed, timed out, or otherwise failed +func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) { + v := url.Values{ + "client_id": []string{code.ClientID}, + "code": []string{*code.DeviceCode}, + "grant_type": []string{OAuthGrantTypeDeviceCode}, + "resource": []string{code.Resource}, + } + + s := v.Encode() + body := io.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req.WithContext(ctx)) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrOAuthTokenEmpty + } + + var token deviceToken + err = json.Unmarshal(rb, &token) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if token.Error == nil { + return &token.Token, nil + } + + switch *token.Error { + case "authorization_pending": + return nil, ErrDeviceAuthorizationPending + case "slow_down": + return nil, ErrDeviceSlowDown + case "access_denied": + return nil, ErrDeviceAccessDenied + case "code_expired": + return nil, ErrDeviceCodeExpired + default: + // return a more meaningful error message if available + if token.ErrorDescription != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, *token.Error, *token.ErrorDescription) + } + return nil, ErrDeviceGeneric + } +} + +// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. +// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. +// Deprecated: use WaitForUserCompletionWithContext() instead. +func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + return WaitForUserCompletionWithContext(context.Background(), sender, code) +} + +// WaitForUserCompletionWithContext calls CheckForUserCompletion repeatedly until a token is granted or an error +// state occurs. This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. +func WaitForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) { + intervalDuration := time.Duration(*code.Interval) * time.Second + waitDuration := intervalDuration + + for { + token, err := CheckForUserCompletionWithContext(ctx, sender, code) + + if err == nil { + return token, nil + } + + switch err { + case ErrDeviceSlowDown: + waitDuration += waitDuration + case ErrDeviceAuthorizationPending: + // noop + default: // everything else is "fatal" to us + return nil, err + } + + if waitDuration > (intervalDuration * 3) { + return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix) + } + + select { + case <-time.After(waitDuration): + // noop + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go new file mode 100644 index 000000000..647a61bb8 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go @@ -0,0 +1,25 @@ +//go:build modhack +// +build modhack + +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go new file mode 100644 index 000000000..fb54a4323 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go @@ -0,0 +1,134 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + + "golang.org/x/crypto/pkcs12" +) + +var ( + // ErrMissingCertificate is returned when no local certificate is found in the provided PFX data. + ErrMissingCertificate = errors.New("adal: certificate missing") + + // ErrMissingPrivateKey is returned when no private key is found in the provided PFX data. + ErrMissingPrivateKey = errors.New("adal: private key missing") +) + +// LoadToken restores a Token object from a file located at 'path'. +func LoadToken(path string) (*Token, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + } + defer file.Close() + + var token Token + + dec := json.NewDecoder(file) + if err = dec.Decode(&token); err != nil { + return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err) + } + return &token, nil +} + +// SaveToken persists an oauth token at the given location on disk. +// It moves the new file into place so it can safely be used to replace an existing file +// that maybe accessed by multiple processes. +func SaveToken(path string, mode os.FileMode, token Token) error { + dir := filepath.Dir(path) + err := os.MkdirAll(dir, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) + } + + newFile, err := os.CreateTemp(dir, "token") + if err != nil { + return fmt.Errorf("failed to create the temp file to write the token: %v", err) + } + tempPath := newFile.Name() + + if err := json.NewEncoder(newFile).Encode(token); err != nil { + return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err) + } + if err := newFile.Close(); err != nil { + return fmt.Errorf("failed to close temp file %s: %v", tempPath, err) + } + + // Atomic replace to avoid multi-writer file corruptions + if err := os.Rename(tempPath, path); err != nil { + return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err) + } + if err := os.Chmod(path, mode); err != nil { + return fmt.Errorf("failed to chmod the token file %s: %v", path, err) + } + return nil +} + +// DecodePfxCertificateData extracts the x509 certificate and RSA private key from the provided PFX data. +// The PFX data must contain a private key along with a certificate whose public key matches that of the +// private key or an error is returned. +// If the private key is not password protected pass the empty string for password. +func DecodePfxCertificateData(pfxData []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { + blocks, err := pkcs12.ToPEM(pfxData, password) + if err != nil { + return nil, nil, err + } + // first extract the private key + var priv *rsa.PrivateKey + for _, block := range blocks { + if block.Type == "PRIVATE KEY" { + priv, err = x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, nil, err + } + break + } + } + if priv == nil { + return nil, nil, ErrMissingPrivateKey + } + // now find the certificate with the matching public key of our private key + var cert *x509.Certificate + for _, block := range blocks { + if block.Type == "CERTIFICATE" { + pcert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, nil, err + } + certKey, ok := pcert.PublicKey.(*rsa.PublicKey) + if !ok { + // keep looking + continue + } + if priv.E == certKey.E && priv.N.Cmp(certKey.N) == 0 { + // found a match + cert = pcert + break + } + } + } + if cert == nil { + return nil, nil, ErrMissingCertificate + } + return cert, priv, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go new file mode 100644 index 000000000..eb649bce9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go @@ -0,0 +1,101 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/tls" + "net" + "net/http" + "net/http/cookiejar" + "sync" + "time" + + "github.com/Azure/go-autorest/tracing" +) + +const ( + contentType = "Content-Type" + mimeTypeFormPost = "application/x-www-form-urlencoded" +) + +// DO NOT ACCESS THIS DIRECTLY. go through sender() +var defaultSender Sender +var defaultSenderInit = &sync.Once{} + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(sender(), decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} + +func sender() Sender { + // note that we can't init defaultSender in init() since it will + // execute before calling code has had a chance to enable tracing + defaultSenderInit.Do(func() { + // copied from http.DefaultTransport with a TLS minimum version. + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + }, + } + var roundTripper http.RoundTripper = transport + if tracing.IsEnabled() { + roundTripper = tracing.NewTransport(transport) + } + j, _ := cookiejar.New(nil) + defaultSender = &http.Client{Jar: j, Transport: roundTripper} + }) + return defaultSender +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go new file mode 100644 index 000000000..67baecd83 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -0,0 +1,1429 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/logger" + "github.com/golang-jwt/jwt/v4" +) + +const ( + defaultRefresh = 5 * time.Minute + + // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow + OAuthGrantTypeDeviceCode = "device_code" + + // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows + OAuthGrantTypeClientCredentials = "client_credentials" + + // OAuthGrantTypeUserPass is the "grant_type" identifier used in username and password auth flows + OAuthGrantTypeUserPass = "password" + + // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows + OAuthGrantTypeRefreshToken = "refresh_token" + + // OAuthGrantTypeAuthorizationCode is the "grant_type" identifier used in authorization code flows + OAuthGrantTypeAuthorizationCode = "authorization_code" + + // metadataHeader is the header required by MSI extension + metadataHeader = "Metadata" + + // msiEndpoint is the well known endpoint for getting MSI authentications tokens + msiEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + + // the API version to use for the MSI endpoint + msiAPIVersion = "2018-02-01" + + // the default number of attempts to refresh an MSI authentication token + defaultMaxMSIRefreshAttempts = 5 + + // asMSIEndpointEnv is the environment variable used to store the endpoint on App Service and Functions + msiEndpointEnv = "MSI_ENDPOINT" + + // asMSISecretEnv is the environment variable used to store the request secret on App Service and Functions + msiSecretEnv = "MSI_SECRET" + + // the API version to use for the legacy App Service MSI endpoint + appServiceAPIVersion2017 = "2017-09-01" + + // secret header used when authenticating against app service MSI endpoint + secretHeader = "Secret" + + // the format for expires_on in UTC with AM/PM + expiresOnDateFormatPM = "1/2/2006 15:04:05 PM +00:00" + + // the format for expires_on in UTC without AM/PM + expiresOnDateFormat = "1/2/2006 15:04:05 +00:00" +) + +// OAuthTokenProvider is an interface which should be implemented by an access token retriever +type OAuthTokenProvider interface { + OAuthToken() string +} + +// MultitenantOAuthTokenProvider provides tokens used for multi-tenant authorization. +type MultitenantOAuthTokenProvider interface { + PrimaryOAuthToken() string + AuxiliaryOAuthTokens() []string +} + +// TokenRefreshError is an interface used by errors returned during token refresh. +type TokenRefreshError interface { + error + Response() *http.Response +} + +// Refresher is an interface for token refresh functionality +type Refresher interface { + Refresh() error + RefreshExchange(resource string) error + EnsureFresh() error +} + +// RefresherWithContext is an interface for token refresh functionality +type RefresherWithContext interface { + RefreshWithContext(ctx context.Context) error + RefreshExchangeWithContext(ctx context.Context, resource string) error + EnsureFreshWithContext(ctx context.Context) error +} + +// TokenRefreshCallback is the type representing callbacks that will be called after +// a successful token refresh +type TokenRefreshCallback func(Token) error + +// TokenRefresh is a type representing a custom callback to refresh a token +type TokenRefresh func(ctx context.Context, resource string) (*Token, error) + +// JWTCallback is the type representing callback that will be called to get the federated OIDC JWT +type JWTCallback func() (string, error) + +// Token encapsulates the access token used to authorize Azure requests. +// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response +type Token struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + ExpiresIn json.Number `json:"expires_in"` + ExpiresOn json.Number `json:"expires_on"` + NotBefore json.Number `json:"not_before"` + + Resource string `json:"resource"` + Type string `json:"token_type"` +} + +func newToken() Token { + return Token{ + ExpiresIn: "0", + ExpiresOn: "0", + NotBefore: "0", + } +} + +// IsZero returns true if the token object is zero-initialized. +func (t Token) IsZero() bool { + return t == Token{} +} + +// Expires returns the time.Time when the Token expires. +func (t Token) Expires() time.Time { + s, err := t.ExpiresOn.Float64() + if err != nil { + s = -3600 + } + + expiration := date.NewUnixTimeFromSeconds(s) + + return time.Time(expiration).UTC() +} + +// IsExpired returns true if the Token is expired, false otherwise. +func (t Token) IsExpired() bool { + return t.WillExpireIn(0) +} + +// WillExpireIn returns true if the Token will expire after the passed time.Duration interval +// from now, false otherwise. +func (t Token) WillExpireIn(d time.Duration) bool { + return !t.Expires().After(time.Now().Add(d)) +} + +// OAuthToken return the current access token +func (t *Token) OAuthToken() string { + return t.AccessToken +} + +// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form +// that is submitted when acquiring an oAuth token. +type ServicePrincipalSecret interface { + SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error +} + +// ServicePrincipalNoSecret represents a secret type that contains no secret +// meaning it is not valid for fetching a fresh token. This is used by Manual +type ServicePrincipalNoSecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret +// It only returns an error for the ServicePrincipalNoSecret type +func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") +} + +// MarshalJSON implements the json.Marshaler interface. +func (noSecret ServicePrincipalNoSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalNoSecret", + }) +} + +// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. +type ServicePrincipalTokenSecret struct { + ClientSecret string `json:"value"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using the client_secret. +func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("client_secret", tokenSecret.ClientSecret) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (tokenSecret ServicePrincipalTokenSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Value string `json:"value"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalTokenSecret", + Value: tokenSecret.ClientSecret, + }) +} + +// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. +type ServicePrincipalCertificateSecret struct { + Certificate *x509.Certificate + PrivateKey *rsa.PrivateKey +} + +// SignJwt returns the JWT signed with the certificate's private key. +func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { + hasher := sha1.New() + _, err := hasher.Write(secret.Certificate.Raw) + if err != nil { + return "", err + } + + thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + + // The jti (JWT ID) claim provides a unique identifier for the JWT. + jti := make([]byte, 20) + _, err = rand.Read(jti) + if err != nil { + return "", err + } + + token := jwt.New(jwt.SigningMethodRS256) + token.Header["x5t"] = thumbprint + x5c := []string{base64.StdEncoding.EncodeToString(secret.Certificate.Raw)} + token.Header["x5c"] = x5c + token.Claims = jwt.MapClaims{ + "aud": spt.inner.OauthConfig.TokenEndpoint.String(), + "iss": spt.inner.ClientID, + "sub": spt.inner.ClientID, + "jti": base64.URLEncoding.EncodeToString(jti), + "nbf": time.Now().Unix(), + "exp": time.Now().Add(24 * time.Hour).Unix(), + } + + signedString, err := token.SignedString(secret.PrivateKey) + return signedString, err +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. +func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + jwt, err := secret.SignJwt(spt) + if err != nil { + return err + } + + v.Set("client_assertion", jwt) + v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalCertificateSecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalCertificateSecret is not supported") +} + +// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. +type ServicePrincipalMSISecret struct { + msiType msiType + clientResourceID string +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (msiSecret ServicePrincipalMSISecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalMSISecret is not supported") +} + +// ServicePrincipalUsernamePasswordSecret implements ServicePrincipalSecret for username and password auth. +type ServicePrincipalUsernamePasswordSecret struct { + Username string `json:"username"` + Password string `json:"password"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalUsernamePasswordSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("username", secret.Username) + v.Set("password", secret.Password) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalUsernamePasswordSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Username string `json:"username"` + Password string `json:"password"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalUsernamePasswordSecret", + Username: secret.Username, + Password: secret.Password, + }) +} + +// ServicePrincipalAuthorizationCodeSecret implements ServicePrincipalSecret for authorization code auth. +type ServicePrincipalAuthorizationCodeSecret struct { + ClientSecret string `json:"value"` + AuthorizationCode string `json:"authCode"` + RedirectURI string `json:"redirect"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalAuthorizationCodeSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("code", secret.AuthorizationCode) + v.Set("client_secret", secret.ClientSecret) + v.Set("redirect_uri", secret.RedirectURI) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalAuthorizationCodeSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Value string `json:"value"` + AuthCode string `json:"authCode"` + Redirect string `json:"redirect"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalAuthorizationCodeSecret", + Value: secret.ClientSecret, + AuthCode: secret.AuthorizationCode, + Redirect: secret.RedirectURI, + }) +} + +// ServicePrincipalFederatedSecret implements ServicePrincipalSecret for Federated JWTs. +type ServicePrincipalFederatedSecret struct { + jwtCallback JWTCallback +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during OAuth Token Acquisition using a JWT signed by an OIDC issuer. +func (secret *ServicePrincipalFederatedSecret) SetAuthenticationValues(_ *ServicePrincipalToken, v *url.Values) error { + jwt, err := secret.jwtCallback() + if err != nil { + return err + } + + v.Set("client_assertion", jwt) + v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalFederatedSecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalFederatedSecret is not supported") +} + +// ServicePrincipalToken encapsulates a Token created for a Service Principal. +type ServicePrincipalToken struct { + inner servicePrincipalToken + refreshLock *sync.RWMutex + sender Sender + customRefreshFunc TokenRefresh + refreshCallbacks []TokenRefreshCallback + // MaxMSIRefreshAttempts is the maximum number of attempts to refresh an MSI token. + // Settings this to a value less than 1 will use the default value. + MaxMSIRefreshAttempts int +} + +// MarshalTokenJSON returns the marshalled inner token. +func (spt ServicePrincipalToken) MarshalTokenJSON() ([]byte, error) { + return json.Marshal(spt.inner.Token) +} + +// SetRefreshCallbacks replaces any existing refresh callbacks with the specified callbacks. +func (spt *ServicePrincipalToken) SetRefreshCallbacks(callbacks []TokenRefreshCallback) { + spt.refreshCallbacks = callbacks +} + +// SetCustomRefreshFunc sets a custom refresh function used to refresh the token. +func (spt *ServicePrincipalToken) SetCustomRefreshFunc(customRefreshFunc TokenRefresh) { + spt.customRefreshFunc = customRefreshFunc +} + +// MarshalJSON implements the json.Marshaler interface. +func (spt ServicePrincipalToken) MarshalJSON() ([]byte, error) { + return json.Marshal(spt.inner) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error { + // need to determine the token type + raw := map[string]interface{}{} + err := json.Unmarshal(data, &raw) + if err != nil { + return err + } + secret := raw["secret"].(map[string]interface{}) + switch secret["type"] { + case "ServicePrincipalNoSecret": + spt.inner.Secret = &ServicePrincipalNoSecret{} + case "ServicePrincipalTokenSecret": + spt.inner.Secret = &ServicePrincipalTokenSecret{} + case "ServicePrincipalCertificateSecret": + return errors.New("unmarshalling ServicePrincipalCertificateSecret is not supported") + case "ServicePrincipalMSISecret": + return errors.New("unmarshalling ServicePrincipalMSISecret is not supported") + case "ServicePrincipalUsernamePasswordSecret": + spt.inner.Secret = &ServicePrincipalUsernamePasswordSecret{} + case "ServicePrincipalAuthorizationCodeSecret": + spt.inner.Secret = &ServicePrincipalAuthorizationCodeSecret{} + case "ServicePrincipalFederatedSecret": + return errors.New("unmarshalling ServicePrincipalFederatedSecret is not supported") + default: + return fmt.Errorf("unrecognized token type '%s'", secret["type"]) + } + err = json.Unmarshal(data, &spt.inner) + if err != nil { + return err + } + // Don't override the refreshLock or the sender if those have been already set. + if spt.refreshLock == nil { + spt.refreshLock = &sync.RWMutex{} + } + if spt.sender == nil { + spt.sender = sender() + } + return nil +} + +// internal type used for marshalling/unmarshalling +type servicePrincipalToken struct { + Token Token `json:"token"` + Secret ServicePrincipalSecret `json:"secret"` + OauthConfig OAuthConfig `json:"oauth"` + ClientID string `json:"clientID"` + Resource string `json:"resource"` + AutoRefresh bool `json:"autoRefresh"` + RefreshWithin time.Duration `json:"refreshWithin"` +} + +func validateOAuthConfig(oac OAuthConfig) error { + if oac.IsZero() { + return fmt.Errorf("parameter 'oauthConfig' cannot be zero-initialized") + } + return nil +} + +// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. +func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(id, "id"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("parameter 'secret' cannot be nil") + } + spt := &ServicePrincipalToken{ + inner: servicePrincipalToken{ + Token: newToken(), + OauthConfig: oauthConfig, + Secret: secret, + ClientID: id, + Resource: resource, + AutoRefresh: true, + RefreshWithin: defaultRefresh, + }, + refreshLock: &sync.RWMutex{}, + sender: sender(), + refreshCallbacks: callbacks, + } + return spt, nil +} + +// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token +func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if token.IsZero() { + return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") + } + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalNoSecret{}, + callbacks...) + if err != nil { + return nil, err + } + + spt.inner.Token = token + + return spt, nil +} + +// NewServicePrincipalTokenFromManualTokenSecret creates a ServicePrincipalToken using the supplied token and secret +func NewServicePrincipalTokenFromManualTokenSecret(oauthConfig OAuthConfig, clientID string, resource string, token Token, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("parameter 'secret' cannot be nil") + } + if token.IsZero() { + return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") + } + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + secret, + callbacks...) + if err != nil { + return nil, err + } + + spt.inner.Token = token + + return spt, nil +} + +// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal +// credentials scoped to the named resource. +func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(secret, "secret"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalTokenSecret{ + ClientSecret: secret, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromCertificate creates a ServicePrincipalToken from the supplied pkcs12 bytes. +func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if certificate == nil { + return nil, fmt.Errorf("parameter 'certificate' cannot be nil") + } + if privateKey == nil { + return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromUsernamePassword creates a ServicePrincipalToken from the username and password. +func NewServicePrincipalTokenFromUsernamePassword(oauthConfig OAuthConfig, clientID string, username string, password string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(username, "username"); err != nil { + return nil, err + } + if err := validateStringParam(password, "password"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalUsernamePasswordSecret{ + Username: username, + Password: password, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromAuthorizationCode creates a ServicePrincipalToken from the +func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clientID string, clientSecret string, authorizationCode string, redirectURI string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(clientSecret, "clientSecret"); err != nil { + return nil, err + } + if err := validateStringParam(authorizationCode, "authorizationCode"); err != nil { + return nil, err + } + if err := validateStringParam(redirectURI, "redirectURI"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalAuthorizationCodeSecret{ + ClientSecret: clientSecret, + AuthorizationCode: authorizationCode, + RedirectURI: redirectURI, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromFederatedToken creates a ServicePrincipalToken from the supplied federated OIDC JWT. +// +// Deprecated: Use NewServicePrincipalTokenFromFederatedTokenWithCallback to refresh jwt dynamically. +func NewServicePrincipalTokenFromFederatedToken(oauthConfig OAuthConfig, clientID string, jwt string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if jwt == "" { + return nil, fmt.Errorf("parameter 'jwt' cannot be empty") + } + return NewServicePrincipalTokenFromFederatedTokenCallback( + oauthConfig, + clientID, + func() (string, error) { + return jwt, nil + }, + resource, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromFederatedTokenCallback creates a ServicePrincipalToken from the supplied federated OIDC JWTCallback. +func NewServicePrincipalTokenFromFederatedTokenCallback(oauthConfig OAuthConfig, clientID string, jwtCallback JWTCallback, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if jwtCallback == nil { + return nil, fmt.Errorf("parameter 'jwtCallback' cannot be empty") + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalFederatedSecret{ + jwtCallback: jwtCallback, + }, + callbacks..., + ) +} + +type msiType int + +const ( + msiTypeUnavailable msiType = iota + msiTypeAppServiceV20170901 + msiTypeCloudShell + msiTypeIMDS +) + +func (m msiType) String() string { + switch m { + case msiTypeAppServiceV20170901: + return "AppServiceV20170901" + case msiTypeCloudShell: + return "CloudShell" + case msiTypeIMDS: + return "IMDS" + default: + return fmt.Sprintf("unhandled MSI type %d", m) + } +} + +// returns the MSI type and endpoint, or an error +func getMSIType() (msiType, string, error) { + if endpointEnvVar := os.Getenv(msiEndpointEnv); endpointEnvVar != "" { + // if the env var MSI_ENDPOINT is set + if secretEnvVar := os.Getenv(msiSecretEnv); secretEnvVar != "" { + // if BOTH the env vars MSI_ENDPOINT and MSI_SECRET are set the msiType is AppService + return msiTypeAppServiceV20170901, endpointEnvVar, nil + } + // if ONLY the env var MSI_ENDPOINT is set the msiType is CloudShell + return msiTypeCloudShell, endpointEnvVar, nil + } + // if MSI_ENDPOINT is NOT set assume the msiType is IMDS + return msiTypeIMDS, msiEndpoint, nil +} + +// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. +// NOTE: this always returns the IMDS endpoint, it does not work for app services or cloud shell. +// Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint. +func GetMSIVMEndpoint() (string, error) { + return msiEndpoint, nil +} + +// GetMSIAppServiceEndpoint get the MSI endpoint for App Service and Functions. +// It will return an error when not running in an app service/functions environment. +// Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint. +func GetMSIAppServiceEndpoint() (string, error) { + msiType, endpoint, err := getMSIType() + if err != nil { + return "", err + } + switch msiType { + case msiTypeAppServiceV20170901: + return endpoint, nil + default: + return "", fmt.Errorf("%s is not app service environment", msiType) + } +} + +// GetMSIEndpoint get the appropriate MSI endpoint depending on the runtime environment +// Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint. +func GetMSIEndpoint() (string, error) { + _, endpoint, err := getMSIType() + return endpoint, err +} + +// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the system assigned identity when creating the token. +// msiEndpoint - empty string, or pass a non-empty string to override the default value. +// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead. +func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, "", "", callbacks...) +} + +// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the clientID of specified user assigned identity when creating the token. +// msiEndpoint - empty string, or pass a non-empty string to override the default value. +// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead. +func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateStringParam(userAssignedID, "userAssignedID"); err != nil { + return nil, err + } + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, userAssignedID, "", callbacks...) +} + +// NewServicePrincipalTokenFromMSIWithIdentityResourceID creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the azure resource id of user assigned identity when creating the token. +// msiEndpoint - empty string, or pass a non-empty string to override the default value. +// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead. +func NewServicePrincipalTokenFromMSIWithIdentityResourceID(msiEndpoint, resource string, identityResourceID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateStringParam(identityResourceID, "identityResourceID"); err != nil { + return nil, err + } + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, "", identityResourceID, callbacks...) +} + +// ManagedIdentityOptions contains optional values for configuring managed identity authentication. +type ManagedIdentityOptions struct { + // ClientID is the user-assigned identity to use during authentication. + // It is mutually exclusive with IdentityResourceID. + ClientID string + + // IdentityResourceID is the resource ID of the user-assigned identity to use during authentication. + // It is mutually exclusive with ClientID. + IdentityResourceID string +} + +// NewServicePrincipalTokenFromManagedIdentity creates a ServicePrincipalToken using a managed identity. +// It supports the following managed identity environments. +// - App Service Environment (API version 2017-09-01 only) +// - Cloud shell +// - IMDS with a system or user assigned identity +func NewServicePrincipalTokenFromManagedIdentity(resource string, options *ManagedIdentityOptions, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if options == nil { + options = &ManagedIdentityOptions{} + } + return newServicePrincipalTokenFromMSI("", resource, options.ClientID, options.IdentityResourceID, callbacks...) +} + +func newServicePrincipalTokenFromMSI(msiEndpoint, resource, userAssignedID, identityResourceID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if userAssignedID != "" && identityResourceID != "" { + return nil, errors.New("cannot specify userAssignedID and identityResourceID") + } + msiType, endpoint, err := getMSIType() + if err != nil { + logger.Instance.Writef(logger.LogError, "Error determining managed identity environment: %v\n", err) + return nil, err + } + logger.Instance.Writef(logger.LogInfo, "Managed identity environment is %s, endpoint is %s\n", msiType, endpoint) + if msiEndpoint != "" { + endpoint = msiEndpoint + logger.Instance.Writef(logger.LogInfo, "Managed identity custom endpoint is %s\n", endpoint) + } + msiEndpointURL, err := url.Parse(endpoint) + if err != nil { + return nil, err + } + // cloud shell sends its data in the request body + if msiType != msiTypeCloudShell { + v := url.Values{} + v.Set("resource", resource) + clientIDParam := "client_id" + switch msiType { + case msiTypeAppServiceV20170901: + clientIDParam = "clientid" + v.Set("api-version", appServiceAPIVersion2017) + break + case msiTypeIMDS: + v.Set("api-version", msiAPIVersion) + } + if userAssignedID != "" { + v.Set(clientIDParam, userAssignedID) + } else if identityResourceID != "" { + v.Set("mi_res_id", identityResourceID) + } + msiEndpointURL.RawQuery = v.Encode() + } + + spt := &ServicePrincipalToken{ + inner: servicePrincipalToken{ + Token: newToken(), + OauthConfig: OAuthConfig{ + TokenEndpoint: *msiEndpointURL, + }, + Secret: &ServicePrincipalMSISecret{ + msiType: msiType, + clientResourceID: identityResourceID, + }, + Resource: resource, + AutoRefresh: true, + RefreshWithin: defaultRefresh, + ClientID: userAssignedID, + }, + refreshLock: &sync.RWMutex{}, + sender: sender(), + refreshCallbacks: callbacks, + MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts, + } + + return spt, nil +} + +// internal type that implements TokenRefreshError +type tokenRefreshError struct { + message string + resp *http.Response +} + +// Error implements the error interface which is part of the TokenRefreshError interface. +func (tre tokenRefreshError) Error() string { + return tre.message +} + +// Response implements the TokenRefreshError interface, it returns the raw HTTP response from the refresh operation. +func (tre tokenRefreshError) Response() *http.Response { + return tre.resp +} + +func newTokenRefreshError(message string, resp *http.Response) TokenRefreshError { + return tokenRefreshError{message: message, resp: resp} +} + +// EnsureFresh will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (spt *ServicePrincipalToken) EnsureFresh() error { + return spt.EnsureFreshWithContext(context.Background()) +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (spt *ServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + // must take the read lock when initially checking the token's expiration + if spt.inner.AutoRefresh && spt.Token().WillExpireIn(spt.inner.RefreshWithin) { + // take the write lock then check again to see if the token was already refreshed + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + if spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { + return spt.refreshInternal(ctx, spt.inner.Resource) + } + } + return nil +} + +// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization +func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { + if spt.refreshCallbacks != nil { + for _, callback := range spt.refreshCallbacks { + err := callback(spt.inner.Token) + if err != nil { + return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err) + } + } + } + return nil +} + +// Refresh obtains a fresh token for the Service Principal. +// This method is safe for concurrent use. +func (spt *ServicePrincipalToken) Refresh() error { + return spt.RefreshWithContext(context.Background()) +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +// This method is safe for concurrent use. +func (spt *ServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(ctx, spt.inner.Resource) +} + +// RefreshExchange refreshes the token, but for a different resource. +// This method is safe for concurrent use. +func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { + return spt.RefreshExchangeWithContext(context.Background(), resource) +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +// This method is safe for concurrent use. +func (spt *ServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(ctx, resource) +} + +func (spt *ServicePrincipalToken) getGrantType() string { + switch spt.inner.Secret.(type) { + case *ServicePrincipalUsernamePasswordSecret: + return OAuthGrantTypeUserPass + case *ServicePrincipalAuthorizationCodeSecret: + return OAuthGrantTypeAuthorizationCode + default: + return OAuthGrantTypeClientCredentials + } +} + +func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error { + if spt.customRefreshFunc != nil { + token, err := spt.customRefreshFunc(ctx, resource) + if err != nil { + return err + } + spt.inner.Token = *token + return spt.InvokeRefreshCallbacks(spt.inner.Token) + } + req, err := http.NewRequest(http.MethodPost, spt.inner.OauthConfig.TokenEndpoint.String(), nil) + if err != nil { + return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) + } + req.Header.Add("User-Agent", UserAgent()) + req = req.WithContext(ctx) + var resp *http.Response + authBodyFilter := func(b []byte) []byte { + if logger.Level() != logger.LogAuth { + return []byte("**REDACTED** authentication body") + } + return b + } + if msiSecret, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); ok { + switch msiSecret.msiType { + case msiTypeAppServiceV20170901: + req.Method = http.MethodGet + req.Header.Set("secret", os.Getenv(msiSecretEnv)) + break + case msiTypeCloudShell: + req.Header.Set("Metadata", "true") + data := url.Values{} + data.Set("resource", spt.inner.Resource) + if spt.inner.ClientID != "" { + data.Set("client_id", spt.inner.ClientID) + } else if msiSecret.clientResourceID != "" { + data.Set("msi_res_id", msiSecret.clientResourceID) + } + req.Body = io.NopCloser(strings.NewReader(data.Encode())) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + break + case msiTypeIMDS: + req.Method = http.MethodGet + req.Header.Set("Metadata", "true") + break + } + logger.Instance.WriteRequest(req, logger.Filter{Body: authBodyFilter}) + resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts) + } else { + v := url.Values{} + v.Set("client_id", spt.inner.ClientID) + v.Set("resource", resource) + + if spt.inner.Token.RefreshToken != "" { + v.Set("grant_type", OAuthGrantTypeRefreshToken) + v.Set("refresh_token", spt.inner.Token.RefreshToken) + // web apps must specify client_secret when refreshing tokens + // see https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-protocols-oauth-code#refreshing-the-access-tokens + if spt.getGrantType() == OAuthGrantTypeAuthorizationCode { + err := spt.inner.Secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + } else { + v.Set("grant_type", spt.getGrantType()) + err := spt.inner.Secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + + s := v.Encode() + body := io.NopCloser(strings.NewReader(s)) + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + req.Body = body + logger.Instance.WriteRequest(req, logger.Filter{Body: authBodyFilter}) + resp, err = spt.sender.Do(req) + } + + // don't return a TokenRefreshError here; this will allow retry logic to apply + if err != nil { + return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err) + } else if resp == nil { + return fmt.Errorf("adal: received nil response and error") + } + + logger.Instance.WriteResponse(resp, logger.Filter{Body: authBodyFilter}) + defer resp.Body.Close() + rb, err := io.ReadAll(resp.Body) + + if resp.StatusCode != http.StatusOK { + if err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body: %v Endpoint %s", resp.StatusCode, err, req.URL.String()), resp) + } + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s Endpoint %s", resp.StatusCode, string(rb), req.URL.String()), resp) + } + + // for the following error cases don't return a TokenRefreshError. the operation succeeded + // but some transient failure happened during deserialization. by returning a generic error + // the retry logic will kick in (we don't retry on TokenRefreshError). + + if err != nil { + return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return fmt.Errorf("adal: Empty service principal token received during refresh") + } + token := struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + // AAD returns expires_in as a string, ADFS returns it as an int + ExpiresIn json.Number `json:"expires_in"` + // expires_on can be in three formats, a UTC time stamp, or the number of seconds as a string *or* int. + ExpiresOn interface{} `json:"expires_on"` + NotBefore json.Number `json:"not_before"` + + Resource string `json:"resource"` + Type string `json:"token_type"` + }{} + // return a TokenRefreshError in the follow error cases as the token is in an unexpected format + err = json.Unmarshal(rb, &token) + if err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)), resp) + } + expiresOn := json.Number("") + // ADFS doesn't include the expires_on field + if token.ExpiresOn != nil { + if expiresOn, err = parseExpiresOn(token.ExpiresOn); err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: failed to parse expires_on: %v value '%s'", err, token.ExpiresOn), resp) + } + } + spt.inner.Token.AccessToken = token.AccessToken + spt.inner.Token.RefreshToken = token.RefreshToken + spt.inner.Token.ExpiresIn = token.ExpiresIn + spt.inner.Token.ExpiresOn = expiresOn + spt.inner.Token.NotBefore = token.NotBefore + spt.inner.Token.Resource = token.Resource + spt.inner.Token.Type = token.Type + + return spt.InvokeRefreshCallbacks(spt.inner.Token) +} + +// converts expires_on to the number of seconds +func parseExpiresOn(s interface{}) (json.Number, error) { + // the JSON unmarshaler treats JSON numbers unmarshaled into an interface{} as float64 + asFloat64, ok := s.(float64) + if ok { + // this is the number of seconds as int case + return json.Number(strconv.FormatInt(int64(asFloat64), 10)), nil + } + asStr, ok := s.(string) + if !ok { + return "", fmt.Errorf("unexpected expires_on type %T", s) + } + // convert the expiration date to the number of seconds from the unix epoch + timeToDuration := func(t time.Time) json.Number { + return json.Number(strconv.FormatInt(t.UTC().Unix(), 10)) + } + if _, err := json.Number(asStr).Int64(); err == nil { + // this is the number of seconds case, no conversion required + return json.Number(asStr), nil + } else if eo, err := time.Parse(expiresOnDateFormatPM, asStr); err == nil { + return timeToDuration(eo), nil + } else if eo, err := time.Parse(expiresOnDateFormat, asStr); err == nil { + return timeToDuration(eo), nil + } else { + // unknown format + return json.Number(""), err + } +} + +// retry logic specific to retrieving a token from the IMDS endpoint +func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http.Response, err error) { + // copied from client.go due to circular dependency + retries := []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } + // extra retry status codes specific to IMDS + retries = append(retries, + http.StatusNotFound, + http.StatusGone, + // all remaining 5xx + http.StatusNotImplemented, + http.StatusHTTPVersionNotSupported, + http.StatusVariantAlsoNegotiates, + http.StatusInsufficientStorage, + http.StatusLoopDetected, + http.StatusNotExtended, + http.StatusNetworkAuthenticationRequired) + + // see https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/how-to-use-vm-token#retry-guidance + + const maxDelay time.Duration = 60 * time.Second + + attempt := 0 + delay := time.Duration(0) + + // maxAttempts is user-specified, ensure that its value is greater than zero else no request will be made + if maxAttempts < 1 { + maxAttempts = defaultMaxMSIRefreshAttempts + } + + for attempt < maxAttempts { + if resp != nil && resp.Body != nil { + io.Copy(io.Discard, resp.Body) + resp.Body.Close() + } + resp, err = sender.Do(req) + // we want to retry if err is not nil or the status code is in the list of retry codes + if err == nil && !responseHasStatusCode(resp, retries...) { + return + } + + // perform exponential backoff with a cap. + // must increment attempt before calculating delay. + attempt++ + // the base value of 2 is the "delta backoff" as specified in the guidance doc + delay += (time.Duration(math.Pow(2, float64(attempt))) * time.Second) + if delay > maxDelay { + delay = maxDelay + } + + select { + case <-time.After(delay): + // intentionally left blank + case <-req.Context().Done(): + err = req.Context().Err() + return + } + } + return +} + +func responseHasStatusCode(resp *http.Response, codes ...int) bool { + if resp != nil { + for _, i := range codes { + if i == resp.StatusCode { + return true + } + } + } + return false +} + +// SetAutoRefresh enables or disables automatic refreshing of stale tokens. +func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { + spt.inner.AutoRefresh = autoRefresh +} + +// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will +// refresh the token. +func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { + spt.inner.RefreshWithin = d + return +} + +// SetSender sets the http.Client used when obtaining the Service Principal token. An +// undecorated http.Client is used by default. +func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s } + +// OAuthToken implements the OAuthTokenProvider interface. It returns the current access token. +func (spt *ServicePrincipalToken) OAuthToken() string { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.inner.Token.OAuthToken() +} + +// Token returns a copy of the current token. +func (spt *ServicePrincipalToken) Token() Token { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.inner.Token +} + +// MultiTenantServicePrincipalToken contains tokens for multi-tenant authorization. +type MultiTenantServicePrincipalToken struct { + PrimaryToken *ServicePrincipalToken + AuxiliaryTokens []*ServicePrincipalToken +} + +// PrimaryOAuthToken returns the primary authorization token. +func (mt *MultiTenantServicePrincipalToken) PrimaryOAuthToken() string { + return mt.PrimaryToken.OAuthToken() +} + +// AuxiliaryOAuthTokens returns one to three auxiliary authorization tokens. +func (mt *MultiTenantServicePrincipalToken) AuxiliaryOAuthTokens() []string { + tokens := make([]string, len(mt.AuxiliaryTokens)) + for i := range mt.AuxiliaryTokens { + tokens[i] = mt.AuxiliaryTokens[i].OAuthToken() + } + return tokens +} + +// NewMultiTenantServicePrincipalToken creates a new MultiTenantServicePrincipalToken with the specified credentials and resource. +func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig, clientID string, secret string, resource string) (*MultiTenantServicePrincipalToken, error) { + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(secret, "secret"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + auxTenants := multiTenantCfg.AuxiliaryTenants() + m := MultiTenantServicePrincipalToken{ + AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)), + } + primary, err := NewServicePrincipalToken(*multiTenantCfg.PrimaryTenant(), clientID, secret, resource) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err) + } + m.PrimaryToken = primary + for i := range auxTenants { + aux, err := NewServicePrincipalToken(*auxTenants[i], clientID, secret, resource) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err) + } + m.AuxiliaryTokens[i] = aux + } + return &m, nil +} + +// NewMultiTenantServicePrincipalTokenFromCertificate creates a new MultiTenantServicePrincipalToken with the specified certificate credentials and resource. +func NewMultiTenantServicePrincipalTokenFromCertificate(multiTenantCfg MultiTenantOAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string) (*MultiTenantServicePrincipalToken, error) { + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if certificate == nil { + return nil, fmt.Errorf("parameter 'certificate' cannot be nil") + } + if privateKey == nil { + return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") + } + auxTenants := multiTenantCfg.AuxiliaryTenants() + m := MultiTenantServicePrincipalToken{ + AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)), + } + primary, err := NewServicePrincipalTokenWithSecret( + *multiTenantCfg.PrimaryTenant(), + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err) + } + m.PrimaryToken = primary + for i := range auxTenants { + aux, err := NewServicePrincipalTokenWithSecret( + *auxTenants[i], + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err) + } + m.AuxiliaryTokens[i] = aux + } + return &m, nil +} + +// MSIAvailable returns true if the MSI endpoint is available for authentication. +func MSIAvailable(ctx context.Context, s Sender) bool { + msiType, _, err := getMSIType() + + if err != nil { + return false + } + + if msiType != msiTypeIMDS { + return true + } + + if s == nil { + s = sender() + } + + resp, err := getMSIEndpoint(ctx, s) + + if err == nil { + resp.Body.Close() + } + + return err == nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go new file mode 100644 index 000000000..89190a421 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go @@ -0,0 +1,76 @@ +//go:build go1.13 +// +build go1.13 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adal + +import ( + "context" + "fmt" + "net/http" + "time" +) + +func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) { + tempCtx, cancel := context.WithTimeout(ctx, 2*time.Second) + defer cancel() + // http.NewRequestWithContext() was added in Go 1.13 + req, _ := http.NewRequestWithContext(tempCtx, http.MethodGet, msiEndpoint, nil) + q := req.URL.Query() + q.Add("api-version", msiAPIVersion) + req.URL.RawQuery = q.Encode() + return sender.Do(req) +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh primary token: %w", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.EnsureFreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %w", err) + } + } + return nil +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh primary token: %w", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %w", err) + } + } + return nil +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil { + return fmt.Errorf("failed to refresh primary token: %w", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %w", err) + } + } + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go new file mode 100644 index 000000000..27ec4efad --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go @@ -0,0 +1,75 @@ +//go:build !go1.13 +// +build !go1.13 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adal + +import ( + "context" + "net/http" + "time" +) + +func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) { + tempCtx, cancel := context.WithTimeout(ctx, 2*time.Second) + defer cancel() + req, _ := http.NewRequest(http.MethodGet, msiEndpoint, nil) + req = req.WithContext(tempCtx) + q := req.URL.Query() + q.Add("api-version", msiAPIVersion) + req.URL.RawQuery = q.Encode() + return sender.Do(req) +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil { + return err + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.EnsureFreshWithContext(ctx); err != nil { + return err + } + } + return nil +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil { + return err + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshWithContext(ctx); err != nil { + return err + } + } + return nil +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil { + return err + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/version.go b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go new file mode 100644 index 000000000..c867b3484 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go @@ -0,0 +1,45 @@ +package adal + +import ( + "fmt" + "runtime" +) + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const number = "v1.0.0" + +var ( + ua = fmt.Sprintf("Go/%s (%s-%s) go-autorest/adal/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + number, + ) +) + +// UserAgent returns a string containing the Go version, system architecture and OS, and the adal version. +func UserAgent() string { + return ua +} + +// AddToUserAgent adds an extension to the current user agent +func AddToUserAgent(extension string) error { + if extension != "" { + ua = fmt.Sprintf("%s %s", ua, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent remained as '%s'", ua) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go new file mode 100644 index 000000000..1226c4111 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go @@ -0,0 +1,353 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/tls" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/Azure/go-autorest/autorest/adal" +) + +const ( + bearerChallengeHeader = "Www-Authenticate" + bearer = "Bearer" + tenantID = "tenantID" + apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key" + bingAPISdkHeader = "X-BingApis-SDK-Client" + golangBingAPISdkHeaderValue = "Go-SDK" + authorization = "Authorization" + basic = "Basic" +) + +// Authorizer is the interface that provides a PrepareDecorator used to supply request +// authorization. Most often, the Authorizer decorator runs last so it has access to the full +// state of the formed HTTP request. +type Authorizer interface { + WithAuthorization() PrepareDecorator +} + +// NullAuthorizer implements a default, "do nothing" Authorizer. +type NullAuthorizer struct{} + +// WithAuthorization returns a PrepareDecorator that does nothing. +func (na NullAuthorizer) WithAuthorization() PrepareDecorator { + return WithNothing() +} + +// APIKeyAuthorizer implements API Key authorization. +type APIKeyAuthorizer struct { + headers map[string]interface{} + queryParameters map[string]interface{} +} + +// NewAPIKeyAuthorizerWithHeaders creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizerWithHeaders(headers map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(headers, nil) +} + +// NewAPIKeyAuthorizerWithQueryParameters creates an ApiKeyAuthorizer with query parameters. +func NewAPIKeyAuthorizerWithQueryParameters(queryParameters map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(nil, queryParameters) +} + +// NewAPIKeyAuthorizer creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[string]interface{}) *APIKeyAuthorizer { + return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Parameters. +func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters)) + } +} + +// CognitiveServicesAuthorizer implements authorization for Cognitive Services. +type CognitiveServicesAuthorizer struct { + subscriptionKey string +} + +// NewCognitiveServicesAuthorizer is +func NewCognitiveServicesAuthorizer(subscriptionKey string) *CognitiveServicesAuthorizer { + return &CognitiveServicesAuthorizer{subscriptionKey: subscriptionKey} +} + +// WithAuthorization is +func (csa *CognitiveServicesAuthorizer) WithAuthorization() PrepareDecorator { + headers := make(map[string]interface{}) + headers[apiKeyAuthorizerHeader] = csa.subscriptionKey + headers[bingAPISdkHeader] = golangBingAPISdkHeaderValue + + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// BearerAuthorizer implements the bearer authorization +type BearerAuthorizer struct { + tokenProvider adal.OAuthTokenProvider +} + +// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider +func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer { + return &BearerAuthorizer{tokenProvider: tp} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the token. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + // the ordering is important here, prefer RefresherWithContext if available + if refresher, ok := ba.tokenProvider.(adal.RefresherWithContext); ok { + err = refresher.EnsureFreshWithContext(r.Context()) + } else if refresher, ok := ba.tokenProvider.(adal.Refresher); ok { + err = refresher.EnsureFresh() + } + if err != nil { + var resp *http.Response + if tokError, ok := err.(adal.TokenRefreshError); ok { + resp = tokError.Response() + } + return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp, + "Failed to refresh the Token for request to %s", r.URL) + } + return Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken()))) + } + return r, err + }) + } +} + +// TokenProvider returns OAuthTokenProvider so that it can be used for authorization outside the REST. +func (ba *BearerAuthorizer) TokenProvider() adal.OAuthTokenProvider { + return ba.tokenProvider +} + +// BearerAuthorizerCallbackFunc is the authentication callback signature. +type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) + +// BearerAuthorizerCallback implements bearer authorization via a callback. +type BearerAuthorizerCallback struct { + sender Sender + callback BearerAuthorizerCallbackFunc +} + +// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback +// is invoked when the HTTP request is submitted. +func NewBearerAuthorizerCallback(s Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { + if s == nil { + s = sender(tls.RenegotiateNever) + } + return &BearerAuthorizerCallback{sender: s, callback: callback} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value +// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + // make a copy of the request and remove the body as it's not + // required and avoids us having to create a copy of it. + rCopy := *r + removeRequestBody(&rCopy) + + resp, err := bacb.sender.Do(&rCopy) + if err != nil { + return r, err + } + DrainResponseBody(resp) + if resp.StatusCode == 401 && hasBearerChallenge(resp.Header) { + bc, err := newBearerChallenge(resp.Header) + if err != nil { + return r, err + } + if bacb.callback != nil { + ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"]) + if err != nil { + return r, err + } + return Prepare(r, ba.WithAuthorization()) + } + } + } + return r, err + }) + } +} + +// returns true if the HTTP response contains a bearer challenge +func hasBearerChallenge(header http.Header) bool { + authHeader := header.Get(bearerChallengeHeader) + if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 { + return false + } + return true +} + +type bearerChallenge struct { + values map[string]string +} + +func newBearerChallenge(header http.Header) (bc bearerChallenge, err error) { + challenge := strings.TrimSpace(header.Get(bearerChallengeHeader)) + trimmedChallenge := challenge[len(bearer)+1:] + + // challenge is a set of key=value pairs that are comma delimited + pairs := strings.Split(trimmedChallenge, ",") + if len(pairs) < 1 { + err = fmt.Errorf("challenge '%s' contains no pairs", challenge) + return bc, err + } + + bc.values = make(map[string]string) + for i := range pairs { + trimmedPair := strings.TrimSpace(pairs[i]) + pair := strings.Split(trimmedPair, "=") + if len(pair) == 2 { + // remove the enclosing quotes + key := strings.Trim(pair[0], "\"") + value := strings.Trim(pair[1], "\"") + + switch key { + case "authorization", "authorization_uri": + // strip the tenant ID from the authorization URL + asURL, err := url.Parse(value) + if err != nil { + return bc, err + } + bc.values[tenantID] = asURL.Path[1:] + default: + bc.values[key] = value + } + } + } + + return bc, err +} + +// EventGridKeyAuthorizer implements authorization for event grid using key authentication. +type EventGridKeyAuthorizer struct { + topicKey string +} + +// NewEventGridKeyAuthorizer creates a new EventGridKeyAuthorizer +// with the specified topic key. +func NewEventGridKeyAuthorizer(topicKey string) EventGridKeyAuthorizer { + return EventGridKeyAuthorizer{topicKey: topicKey} +} + +// WithAuthorization returns a PrepareDecorator that adds the aeg-sas-key authentication header. +func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator { + headers := map[string]interface{}{ + "aeg-sas-key": egta.topicKey, + } + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// BasicAuthorizer implements basic HTTP authorization by adding the Authorization HTTP header +// with the value "Basic " where is a base64-encoded username:password tuple. +type BasicAuthorizer struct { + userName string + password string +} + +// NewBasicAuthorizer creates a new BasicAuthorizer with the specified username and password. +func NewBasicAuthorizer(userName, password string) *BasicAuthorizer { + return &BasicAuthorizer{ + userName: userName, + password: password, + } +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Basic " followed by the base64-encoded username:password tuple. +func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator { + headers := make(map[string]interface{}) + headers[authorization] = basic + " " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", ba.userName, ba.password))) + + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// MultiTenantServicePrincipalTokenAuthorizer provides authentication across tenants. +type MultiTenantServicePrincipalTokenAuthorizer interface { + WithAuthorization() PrepareDecorator +} + +// NewMultiTenantServicePrincipalTokenAuthorizer crates a BearerAuthorizer using the given token provider +func NewMultiTenantServicePrincipalTokenAuthorizer(tp adal.MultitenantOAuthTokenProvider) MultiTenantServicePrincipalTokenAuthorizer { + return NewMultiTenantBearerAuthorizer(tp) +} + +// MultiTenantBearerAuthorizer implements bearer authorization across multiple tenants. +type MultiTenantBearerAuthorizer struct { + tp adal.MultitenantOAuthTokenProvider +} + +// NewMultiTenantBearerAuthorizer creates a MultiTenantBearerAuthorizer using the given token provider. +func NewMultiTenantBearerAuthorizer(tp adal.MultitenantOAuthTokenProvider) *MultiTenantBearerAuthorizer { + return &MultiTenantBearerAuthorizer{tp: tp} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header using the +// primary token along with the auxiliary authorization header using the auxiliary tokens. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (mt *MultiTenantBearerAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + if refresher, ok := mt.tp.(adal.RefresherWithContext); ok { + err = refresher.EnsureFreshWithContext(r.Context()) + if err != nil { + var resp *http.Response + if tokError, ok := err.(adal.TokenRefreshError); ok { + resp = tokError.Response() + } + return r, NewErrorWithError(err, "azure.multiTenantSPTAuthorizer", "WithAuthorization", resp, + "Failed to refresh one or more Tokens for request to %s", r.URL) + } + } + r, err = Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", mt.tp.PrimaryOAuthToken()))) + if err != nil { + return r, err + } + auxTokens := mt.tp.AuxiliaryOAuthTokens() + for i := range auxTokens { + auxTokens[i] = fmt.Sprintf("Bearer %s", auxTokens[i]) + } + return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, ", "))) + }) + } +} + +// TokenProvider returns the underlying MultitenantOAuthTokenProvider for this authorizer. +func (mt *MultiTenantBearerAuthorizer) TokenProvider() adal.MultitenantOAuthTokenProvider { + return mt.tp +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go b/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go new file mode 100644 index 000000000..66501493b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go @@ -0,0 +1,66 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" + "strings" +) + +// SASTokenAuthorizer implements an authorization for SAS Token Authentication +// this can be used for interaction with Blob Storage Endpoints +type SASTokenAuthorizer struct { + sasToken string +} + +// NewSASTokenAuthorizer creates a SASTokenAuthorizer using the given credentials +func NewSASTokenAuthorizer(sasToken string) (*SASTokenAuthorizer, error) { + if strings.TrimSpace(sasToken) == "" { + return nil, fmt.Errorf("sasToken cannot be empty") + } + + token := sasToken + if strings.HasPrefix(sasToken, "?") { + token = strings.TrimPrefix(sasToken, "?") + } + + return &SASTokenAuthorizer{ + sasToken: token, + }, nil +} + +// WithAuthorization returns a PrepareDecorator that adds a shared access signature token to the +// URI's query parameters. This can be used for the Blob, Queue, and File Services. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/delegate-access-with-shared-access-signature +func (sas *SASTokenAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + + if r.URL.RawQuery == "" { + r.URL.RawQuery = sas.sasToken + } else if !strings.Contains(r.URL.RawQuery, sas.sasToken) { + r.URL.RawQuery = fmt.Sprintf("%s&%s", r.URL.RawQuery, sas.sasToken) + } + + return Prepare(r) + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go b/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go new file mode 100644 index 000000000..2af5030a1 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go @@ -0,0 +1,307 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "sort" + "strings" + "time" +) + +// SharedKeyType defines the enumeration for the various shared key types. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key for details on the shared key types. +type SharedKeyType string + +const ( + // SharedKey is used to authorize against blobs, files and queues services. + SharedKey SharedKeyType = "sharedKey" + + // SharedKeyForTable is used to authorize against the table service. + SharedKeyForTable SharedKeyType = "sharedKeyTable" + + // SharedKeyLite is used to authorize against blobs, files and queues services. It's provided for + // backwards compatibility with API versions before 2009-09-19. Prefer SharedKey instead. + SharedKeyLite SharedKeyType = "sharedKeyLite" + + // SharedKeyLiteForTable is used to authorize against the table service. It's provided for + // backwards compatibility with older table API versions. Prefer SharedKeyForTable instead. + SharedKeyLiteForTable SharedKeyType = "sharedKeyLiteTable" +) + +const ( + headerAccept = "Accept" + headerAcceptCharset = "Accept-Charset" + headerContentEncoding = "Content-Encoding" + headerContentLength = "Content-Length" + headerContentMD5 = "Content-MD5" + headerContentLanguage = "Content-Language" + headerIfModifiedSince = "If-Modified-Since" + headerIfMatch = "If-Match" + headerIfNoneMatch = "If-None-Match" + headerIfUnmodifiedSince = "If-Unmodified-Since" + headerDate = "Date" + headerXMSDate = "X-Ms-Date" + headerXMSVersion = "x-ms-version" + headerRange = "Range" +) + +const storageEmulatorAccountName = "devstoreaccount1" + +// SharedKeyAuthorizer implements an authorization for Shared Key +// this can be used for interaction with Blob, File and Queue Storage Endpoints +type SharedKeyAuthorizer struct { + accountName string + accountKey []byte + keyType SharedKeyType +} + +// NewSharedKeyAuthorizer creates a SharedKeyAuthorizer using the provided credentials and shared key type. +func NewSharedKeyAuthorizer(accountName, accountKey string, keyType SharedKeyType) (*SharedKeyAuthorizer, error) { + key, err := base64.StdEncoding.DecodeString(accountKey) + if err != nil { + return nil, fmt.Errorf("malformed storage account key: %v", err) + } + return &SharedKeyAuthorizer{ + accountName: accountName, + accountKey: key, + keyType: keyType, + }, nil +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is " " followed by the computed key. +// This can be used for the Blob, Queue, and File Services +// +// from: https://docs.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key +// You may use Shared Key authorization to authorize a request made against the +// 2009-09-19 version and later of the Blob and Queue services, +// and version 2014-02-14 and later of the File services. +func (sk *SharedKeyAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + + sk, err := buildSharedKey(sk.accountName, sk.accountKey, r, sk.keyType) + if err != nil { + return r, err + } + return Prepare(r, WithHeader(headerAuthorization, sk)) + }) + } +} + +func buildSharedKey(accName string, accKey []byte, req *http.Request, keyType SharedKeyType) (string, error) { + canRes, err := buildCanonicalizedResource(accName, req.URL.String(), keyType) + if err != nil { + return "", err + } + + if req.Header == nil { + req.Header = http.Header{} + } + + // ensure date is set + if req.Header.Get(headerDate) == "" && req.Header.Get(headerXMSDate) == "" { + date := time.Now().UTC().Format(http.TimeFormat) + req.Header.Set(headerXMSDate, date) + } + canString, err := buildCanonicalizedString(req.Method, req.Header, canRes, keyType) + if err != nil { + return "", err + } + return createAuthorizationHeader(accName, accKey, canString, keyType), nil +} + +func buildCanonicalizedResource(accountName, uri string, keyType SharedKeyType) (string, error) { + errMsg := "buildCanonicalizedResource error: %s" + u, err := url.Parse(uri) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + cr := bytes.NewBufferString("") + if accountName != storageEmulatorAccountName { + cr.WriteString("/") + cr.WriteString(getCanonicalizedAccountName(accountName)) + } + + if len(u.Path) > 0 { + // Any portion of the CanonicalizedResource string that is derived from + // the resource's URI should be encoded exactly as it is in the URI. + // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx + cr.WriteString(u.EscapedPath()) + } else { + // a slash is required to indicate the root path + cr.WriteString("/") + } + + params, err := url.ParseQuery(u.RawQuery) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + // See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277 + if keyType == SharedKey { + if len(params) > 0 { + cr.WriteString("\n") + + keys := []string{} + for key := range params { + keys = append(keys, key) + } + sort.Strings(keys) + + completeParams := []string{} + for _, key := range keys { + if len(params[key]) > 1 { + sort.Strings(params[key]) + } + + completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ","))) + } + cr.WriteString(strings.Join(completeParams, "\n")) + } + } else { + // search for "comp" parameter, if exists then add it to canonicalizedresource + if v, ok := params["comp"]; ok { + cr.WriteString("?comp=" + v[0]) + } + } + + return string(cr.Bytes()), nil +} + +func getCanonicalizedAccountName(accountName string) string { + // since we may be trying to access a secondary storage account, we need to + // remove the -secondary part of the storage name + return strings.TrimSuffix(accountName, "-secondary") +} + +func buildCanonicalizedString(verb string, headers http.Header, canonicalizedResource string, keyType SharedKeyType) (string, error) { + contentLength := headers.Get(headerContentLength) + if contentLength == "0" { + contentLength = "" + } + date := headers.Get(headerDate) + if v := headers.Get(headerXMSDate); v != "" { + if keyType == SharedKey || keyType == SharedKeyLite { + date = "" + } else { + date = v + } + } + var canString string + switch keyType { + case SharedKey: + canString = strings.Join([]string{ + verb, + headers.Get(headerContentEncoding), + headers.Get(headerContentLanguage), + contentLength, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + date, + headers.Get(headerIfModifiedSince), + headers.Get(headerIfMatch), + headers.Get(headerIfNoneMatch), + headers.Get(headerIfUnmodifiedSince), + headers.Get(headerRange), + buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + case SharedKeyForTable: + canString = strings.Join([]string{ + verb, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + date, + canonicalizedResource, + }, "\n") + case SharedKeyLite: + canString = strings.Join([]string{ + verb, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + date, + buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + case SharedKeyLiteForTable: + canString = strings.Join([]string{ + date, + canonicalizedResource, + }, "\n") + default: + return "", fmt.Errorf("key type '%s' is not supported", keyType) + } + return canString, nil +} + +func buildCanonicalizedHeader(headers http.Header) string { + cm := make(map[string]string) + + for k := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + if strings.HasPrefix(headerName, "x-ms-") { + cm[headerName] = headers.Get(k) + } + } + + if len(cm) == 0 { + return "" + } + + keys := []string{} + for key := range cm { + keys = append(keys, key) + } + + sort.Strings(keys) + + ch := bytes.NewBufferString("") + + for _, key := range keys { + ch.WriteString(key) + ch.WriteRune(':') + ch.WriteString(cm[key]) + ch.WriteRune('\n') + } + + return strings.TrimSuffix(string(ch.Bytes()), "\n") +} + +func createAuthorizationHeader(accountName string, accountKey []byte, canonicalizedString string, keyType SharedKeyType) string { + h := hmac.New(sha256.New, accountKey) + h.Write([]byte(canonicalizedString)) + signature := base64.StdEncoding.EncodeToString(h.Sum(nil)) + var key string + switch keyType { + case SharedKey, SharedKeyForTable: + key = "SharedKey" + case SharedKeyLite, SharedKeyLiteForTable: + key = "SharedKeyLite" + } + return fmt.Sprintf("%s %s:%s", key, getCanonicalizedAccountName(accountName), signature) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go new file mode 100644 index 000000000..211c98d1e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/autorest.go @@ -0,0 +1,150 @@ +/* +Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines +and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) +generated Go code. + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) + +will set the URL to: + + https://microsoft.com/a/b/c + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., ByUnmarshallingJson) is likely incorrect. + +Lastly, the Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure +correct parsing and formatting. + +Errors raised by autorest objects and methods will conform to the autorest.Error interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. +*/ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "net/http" + "time" +) + +const ( + // HeaderLocation specifies the HTTP Location header. + HeaderLocation = "Location" + + // HeaderRetryAfter specifies the HTTP Retry-After header. + HeaderRetryAfter = "Retry-After" +) + +// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set +// and false otherwise. +func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { + if resp == nil { + return false + } + return containsInt(codes, resp.StatusCode) +} + +// GetLocation retrieves the URL from the Location header of the passed response. +func GetLocation(resp *http.Response) string { + return resp.Header.Get(HeaderLocation) +} + +// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If +// the header is absent or is malformed, it will return the supplied default delay time.Duration. +func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration { + retry := resp.Header.Get(HeaderRetryAfter) + if retry == "" { + return defaultDelay + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + return defaultDelay + } + + return d +} + +// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. +func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare(&http.Request{Cancel: cancel}, + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} + +// NewPollingRequestWithContext allocates and returns a new http.Request with the specified context to poll for the passed response. +func NewPollingRequestWithContext(ctx context.Context, resp *http.Response) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequestWithContext", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare((&http.Request{}).WithContext(ctx), + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequestWithContext", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go new file mode 100644 index 000000000..45575eedb --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -0,0 +1,995 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/logger" + "github.com/Azure/go-autorest/tracing" +) + +const ( + headerAsyncOperation = "Azure-AsyncOperation" +) + +const ( + operationInProgress string = "InProgress" + operationCanceled string = "Canceled" + operationFailed string = "Failed" + operationSucceeded string = "Succeeded" +) + +var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK} + +// FutureAPI contains the set of methods on the Future type. +type FutureAPI interface { + // Response returns the last HTTP response. + Response() *http.Response + + // Status returns the last status message of the operation. + Status() string + + // PollingMethod returns the method used to monitor the status of the asynchronous operation. + PollingMethod() PollingMethodType + + // DoneWithContext queries the service to see if the operation has completed. + DoneWithContext(context.Context, autorest.Sender) (bool, error) + + // GetPollingDelay returns a duration the application should wait before checking + // the status of the asynchronous request and true; this value is returned from + // the service via the Retry-After response header. If the header wasn't returned + // then the function returns the zero-value time.Duration and false. + GetPollingDelay() (time.Duration, bool) + + // WaitForCompletionRef will return when one of the following conditions is met: the long + // running operation has completed, the provided context is cancelled, or the client's + // polling duration has been exceeded. It will retry failed polling attempts based on + // the retry value defined in the client up to the maximum retry attempts. + // If no deadline is specified in the context then the client.PollingDuration will be + // used to determine if a default deadline should be used. + // If PollingDuration is greater than zero the value will be used as the context's timeout. + // If PollingDuration is zero then no default deadline will be used. + WaitForCompletionRef(context.Context, autorest.Client) error + + // MarshalJSON implements the json.Marshaler interface. + MarshalJSON() ([]byte, error) + + // MarshalJSON implements the json.Unmarshaler interface. + UnmarshalJSON([]byte) error + + // PollingURL returns the URL used for retrieving the status of the long-running operation. + PollingURL() string + + // GetResult should be called once polling has completed successfully. + // It makes the final GET call to retrieve the resultant payload. + GetResult(autorest.Sender) (*http.Response, error) +} + +var _ FutureAPI = (*Future)(nil) + +// Future provides a mechanism to access the status and results of an asynchronous request. +// Since futures are stateful they should be passed by value to avoid race conditions. +type Future struct { + pt pollingTracker +} + +// NewFutureFromResponse returns a new Future object initialized +// with the initial response from an asynchronous operation. +func NewFutureFromResponse(resp *http.Response) (Future, error) { + pt, err := createPollingTracker(resp) + return Future{pt: pt}, err +} + +// Response returns the last HTTP response. +func (f Future) Response() *http.Response { + if f.pt == nil { + return nil + } + return f.pt.latestResponse() +} + +// Status returns the last status message of the operation. +func (f Future) Status() string { + if f.pt == nil { + return "" + } + return f.pt.pollingStatus() +} + +// PollingMethod returns the method used to monitor the status of the asynchronous operation. +func (f Future) PollingMethod() PollingMethodType { + if f.pt == nil { + return PollingUnknown + } + return f.pt.pollingMethod() +} + +// DoneWithContext queries the service to see if the operation has completed. +func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) { + ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.DoneWithContext") + defer func() { + sc := -1 + resp := f.Response() + if resp != nil { + sc = resp.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + + if f.pt == nil { + return false, autorest.NewError("Future", "Done", "future is not initialized") + } + if f.pt.hasTerminated() { + return true, f.pt.pollingError() + } + if err := f.pt.pollForStatus(ctx, sender); err != nil { + return false, err + } + if err := f.pt.checkForErrors(); err != nil { + return f.pt.hasTerminated(), err + } + if err := f.pt.updatePollingState(f.pt.provisioningStateApplicable()); err != nil { + return false, err + } + if err := f.pt.initPollingMethod(); err != nil { + return false, err + } + if err := f.pt.updatePollingMethod(); err != nil { + return false, err + } + return f.pt.hasTerminated(), f.pt.pollingError() +} + +// GetPollingDelay returns a duration the application should wait before checking +// the status of the asynchronous request and true; this value is returned from +// the service via the Retry-After response header. If the header wasn't returned +// then the function returns the zero-value time.Duration and false. +func (f Future) GetPollingDelay() (time.Duration, bool) { + if f.pt == nil { + return 0, false + } + resp := f.pt.latestResponse() + if resp == nil { + return 0, false + } + + retry := resp.Header.Get(autorest.HeaderRetryAfter) + if retry == "" { + return 0, false + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + panic(err) + } + + return d, true +} + +// WaitForCompletionRef will return when one of the following conditions is met: the long +// running operation has completed, the provided context is cancelled, or the client's +// polling duration has been exceeded. It will retry failed polling attempts based on +// the retry value defined in the client up to the maximum retry attempts. +// If no deadline is specified in the context then the client.PollingDuration will be +// used to determine if a default deadline should be used. +// If PollingDuration is greater than zero the value will be used as the context's timeout. +// If PollingDuration is zero then no default deadline will be used. +func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) (err error) { + ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.WaitForCompletionRef") + defer func() { + sc := -1 + resp := f.Response() + if resp != nil { + sc = resp.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + cancelCtx := ctx + // if the provided context already has a deadline don't override it + _, hasDeadline := ctx.Deadline() + if d := client.PollingDuration; !hasDeadline && d != 0 { + var cancel context.CancelFunc + cancelCtx, cancel = context.WithTimeout(ctx, d) + defer cancel() + } + // if the initial response has a Retry-After, sleep for the specified amount of time before starting to poll + if delay, ok := f.GetPollingDelay(); ok { + logger.Instance.Writeln(logger.LogInfo, "WaitForCompletionRef: initial polling delay") + if delayElapsed := autorest.DelayForBackoff(delay, 0, cancelCtx.Done()); !delayElapsed { + err = cancelCtx.Err() + return + } + } + done, err := f.DoneWithContext(ctx, client) + for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) { + if attempts >= client.RetryAttempts { + return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded") + } + // we want delayAttempt to be zero in the non-error case so + // that DelayForBackoff doesn't perform exponential back-off + var delayAttempt int + var delay time.Duration + if err == nil { + // check for Retry-After delay, if not present use the client's polling delay + var ok bool + delay, ok = f.GetPollingDelay() + if !ok { + logger.Instance.Writeln(logger.LogInfo, "WaitForCompletionRef: Using client polling delay") + delay = client.PollingDelay + } + } else { + // there was an error polling for status so perform exponential + // back-off based on the number of attempts using the client's retry + // duration. update attempts after delayAttempt to avoid off-by-one. + logger.Instance.Writef(logger.LogError, "WaitForCompletionRef: %s\n", err) + delayAttempt = attempts + delay = client.RetryDuration + attempts++ + } + // wait until the delay elapses or the context is cancelled + delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, cancelCtx.Done()) + if !delayElapsed { + return autorest.NewErrorWithError(cancelCtx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled") + } + } + return +} + +// MarshalJSON implements the json.Marshaler interface. +func (f Future) MarshalJSON() ([]byte, error) { + return json.Marshal(f.pt) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (f *Future) UnmarshalJSON(data []byte) error { + // unmarshal into JSON object to determine the tracker type + obj := map[string]interface{}{} + err := json.Unmarshal(data, &obj) + if err != nil { + return err + } + if obj["method"] == nil { + return autorest.NewError("Future", "UnmarshalJSON", "missing 'method' property") + } + method := obj["method"].(string) + switch strings.ToUpper(method) { + case http.MethodDelete: + f.pt = &pollingTrackerDelete{} + case http.MethodPatch: + f.pt = &pollingTrackerPatch{} + case http.MethodPost: + f.pt = &pollingTrackerPost{} + case http.MethodPut: + f.pt = &pollingTrackerPut{} + default: + return autorest.NewError("Future", "UnmarshalJSON", "unsupoorted method '%s'", method) + } + // now unmarshal into the tracker + return json.Unmarshal(data, &f.pt) +} + +// PollingURL returns the URL used for retrieving the status of the long-running operation. +func (f Future) PollingURL() string { + if f.pt == nil { + return "" + } + return f.pt.pollingURL() +} + +// GetResult should be called once polling has completed successfully. +// It makes the final GET call to retrieve the resultant payload. +func (f Future) GetResult(sender autorest.Sender) (*http.Response, error) { + if f.pt.finalGetURL() == "" { + // we can end up in this situation if the async operation returns a 200 + // with no polling URLs. in that case return the response which should + // contain the JSON payload (only do this for successful terminal cases). + if lr := f.pt.latestResponse(); lr != nil && f.pt.hasSucceeded() { + return lr, nil + } + return nil, autorest.NewError("Future", "GetResult", "missing URL for retrieving result") + } + req, err := http.NewRequest(http.MethodGet, f.pt.finalGetURL(), nil) + if err != nil { + return nil, err + } + resp, err := sender.Do(req) + if err == nil && resp.Body != nil { + // copy the body and close it so callers don't have to + defer resp.Body.Close() + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return resp, err + } + resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + return resp, err +} + +type pollingTracker interface { + // these methods can differ per tracker + + // checks the response headers and status code to determine the polling mechanism + updatePollingMethod() error + + // checks the response for tracker-specific error conditions + checkForErrors() error + + // returns true if provisioning state should be checked + provisioningStateApplicable() bool + + // methods common to all trackers + + // initializes a tracker's polling URL and method, called for each iteration. + // these values can be overridden by each polling tracker as required. + initPollingMethod() error + + // initializes the tracker's internal state, call this when the tracker is created + initializeState() error + + // makes an HTTP request to check the status of the LRO + pollForStatus(ctx context.Context, sender autorest.Sender) error + + // updates internal tracker state, call this after each call to pollForStatus + updatePollingState(provStateApl bool) error + + // returns the error response from the service, can be nil + pollingError() error + + // returns the polling method being used + pollingMethod() PollingMethodType + + // returns the state of the LRO as returned from the service + pollingStatus() string + + // returns the URL used for polling status + pollingURL() string + + // returns the URL used for the final GET to retrieve the resource + finalGetURL() string + + // returns true if the LRO is in a terminal state + hasTerminated() bool + + // returns true if the LRO is in a failed terminal state + hasFailed() bool + + // returns true if the LRO is in a successful terminal state + hasSucceeded() bool + + // returns the cached HTTP response after a call to pollForStatus(), can be nil + latestResponse() *http.Response +} + +type pollingTrackerBase struct { + // resp is the last response, either from the submission of the LRO or from polling + resp *http.Response + + // method is the HTTP verb, this is needed for deserialization + Method string `json:"method"` + + // rawBody is the raw JSON response body + rawBody map[string]interface{} + + // denotes if polling is using async-operation or location header + Pm PollingMethodType `json:"pollingMethod"` + + // the URL to poll for status + URI string `json:"pollingURI"` + + // the state of the LRO as returned from the service + State string `json:"lroState"` + + // the URL to GET for the final result + FinalGetURI string `json:"resultURI"` + + // used to hold an error object returned from the service + Err *ServiceError `json:"error,omitempty"` +} + +func (pt *pollingTrackerBase) initializeState() error { + // determine the initial polling state based on response body and/or HTTP status + // code. this is applicable to the initial LRO response, not polling responses! + pt.Method = pt.resp.Request.Method + if err := pt.updateRawBody(); err != nil { + return err + } + switch pt.resp.StatusCode { + case http.StatusOK: + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + if pt.hasFailed() { + pt.updateErrorFromResponse() + return pt.pollingError() + } + } else { + pt.State = operationSucceeded + } + case http.StatusCreated: + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + } else { + pt.State = operationInProgress + } + case http.StatusAccepted: + pt.State = operationInProgress + case http.StatusNoContent: + pt.State = operationSucceeded + default: + pt.State = operationFailed + pt.updateErrorFromResponse() + return pt.pollingError() + } + return pt.initPollingMethod() +} + +func (pt pollingTrackerBase) getProvisioningState() *string { + if pt.rawBody != nil && pt.rawBody["properties"] != nil { + p := pt.rawBody["properties"].(map[string]interface{}) + if ps := p["provisioningState"]; ps != nil { + s := ps.(string) + return &s + } + } + return nil +} + +func (pt *pollingTrackerBase) updateRawBody() error { + pt.rawBody = map[string]interface{}{} + if pt.resp.ContentLength != 0 { + defer pt.resp.Body.Close() + b, err := ioutil.ReadAll(pt.resp.Body) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body") + } + // put the body back so it's available to other callers + pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + // observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty + if len(b) == 0 { + return nil + } + if err = json.Unmarshal(b, &pt.rawBody); err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to unmarshal response body") + } + } + return nil +} + +func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest.Sender) error { + req, err := http.NewRequest(http.MethodGet, pt.URI, nil) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request") + } + + req = req.WithContext(ctx) + preparer := autorest.CreatePreparer(autorest.GetPrepareDecorators(ctx)...) + req, err = preparer.Prepare(req) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed preparing HTTP request") + } + pt.resp, err = sender.Do(req) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request") + } + if autorest.ResponseHasStatusCode(pt.resp, pollingCodes[:]...) { + // reset the service error on success case + pt.Err = nil + err = pt.updateRawBody() + } else { + // check response body for error content + pt.updateErrorFromResponse() + err = pt.pollingError() + } + return err +} + +// attempts to unmarshal a ServiceError type from the response body. +// if that fails then make a best attempt at creating something meaningful. +// NOTE: this assumes that the async operation has failed. +func (pt *pollingTrackerBase) updateErrorFromResponse() { + var err error + if pt.resp.ContentLength != 0 { + type respErr struct { + ServiceError *ServiceError `json:"error"` + } + re := respErr{} + defer pt.resp.Body.Close() + var b []byte + if b, err = ioutil.ReadAll(pt.resp.Body); err != nil { + goto Default + } + // put the body back so it's available to other callers + pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + if len(b) == 0 { + goto Default + } + if err = json.Unmarshal(b, &re); err != nil { + goto Default + } + // unmarshalling the error didn't yield anything, try unwrapped error + if re.ServiceError == nil { + err = json.Unmarshal(b, &re.ServiceError) + if err != nil { + goto Default + } + } + // the unmarshaller will ensure re.ServiceError is non-nil + // even if there was no content unmarshalled so check the code. + if re.ServiceError.Code != "" { + pt.Err = re.ServiceError + return + } + } +Default: + se := &ServiceError{ + Code: pt.pollingStatus(), + Message: "The async operation failed.", + } + if err != nil { + se.InnerError = make(map[string]interface{}) + se.InnerError["unmarshalError"] = err.Error() + } + // stick the response body into the error object in hopes + // it contains something useful to help diagnose the failure. + if len(pt.rawBody) > 0 { + se.AdditionalInfo = []map[string]interface{}{ + pt.rawBody, + } + } + pt.Err = se +} + +func (pt *pollingTrackerBase) updatePollingState(provStateApl bool) error { + if pt.Pm == PollingAsyncOperation && pt.rawBody["status"] != nil { + pt.State = pt.rawBody["status"].(string) + } else { + if pt.resp.StatusCode == http.StatusAccepted { + pt.State = operationInProgress + } else if provStateApl { + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + } else { + pt.State = operationSucceeded + } + } else { + return autorest.NewError("pollingTrackerBase", "updatePollingState", "the response from the async operation has an invalid status code") + } + } + // if the operation has failed update the error state + if pt.hasFailed() { + pt.updateErrorFromResponse() + } + return nil +} + +func (pt pollingTrackerBase) pollingError() error { + if pt.Err == nil { + return nil + } + return pt.Err +} + +func (pt pollingTrackerBase) pollingMethod() PollingMethodType { + return pt.Pm +} + +func (pt pollingTrackerBase) pollingStatus() string { + return pt.State +} + +func (pt pollingTrackerBase) pollingURL() string { + return pt.URI +} + +func (pt pollingTrackerBase) finalGetURL() string { + return pt.FinalGetURI +} + +func (pt pollingTrackerBase) hasTerminated() bool { + return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) || strings.EqualFold(pt.State, operationSucceeded) +} + +func (pt pollingTrackerBase) hasFailed() bool { + return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) +} + +func (pt pollingTrackerBase) hasSucceeded() bool { + return strings.EqualFold(pt.State, operationSucceeded) +} + +func (pt pollingTrackerBase) latestResponse() *http.Response { + return pt.resp +} + +// error checking common to all trackers +func (pt pollingTrackerBase) baseCheckForErrors() error { + // for Azure-AsyncOperations the response body cannot be nil or empty + if pt.Pm == PollingAsyncOperation { + if pt.resp.Body == nil || pt.resp.ContentLength == 0 { + return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "for Azure-AsyncOperation response body cannot be nil") + } + if pt.rawBody["status"] == nil { + return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "missing status property in Azure-AsyncOperation response body") + } + } + return nil +} + +// default initialization of polling URL/method. each verb tracker will update this as required. +func (pt *pollingTrackerBase) initPollingMethod() error { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + return nil + } + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh != "" { + pt.URI = lh + pt.Pm = PollingLocation + return nil + } + // it's ok if we didn't find a polling header, this will be handled elsewhere + return nil +} + +// DELETE + +type pollingTrackerDelete struct { + pollingTrackerBase +} + +func (pt *pollingTrackerDelete) updatePollingMethod() error { + // for 201 the Location header is required + if pt.resp.StatusCode == http.StatusCreated { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerDelete", "updateHeaders", "missing Location header in 201 response") + } else { + pt.URI = lh + } + pt.Pm = PollingLocation + pt.FinalGetURI = pt.URI + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + // when both headers are returned we use the value in the Location header for the final GET + pt.FinalGetURI = lh + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerDelete) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerDelete) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent +} + +// PATCH + +type pollingTrackerPatch struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPatch) updatePollingMethod() error { + // by default we can use the original URL for polling and final GET + if pt.URI == "" { + pt.URI = pt.resp.Request.URL.String() + } + if pt.FinalGetURI == "" { + pt.FinalGetURI = pt.resp.Request.URL.String() + } + if pt.Pm == PollingUnknown { + pt.Pm = PollingRequestURI + } + // for 201 it's permissible for no headers to be returned + if pt.resp.StatusCode == http.StatusCreated { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + // note the absence of the "final GET" mechanism for PATCH + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + if ao == "" { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerPatch", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } else { + pt.URI = lh + pt.Pm = PollingLocation + } + } + } + return nil +} + +func (pt pollingTrackerPatch) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerPatch) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated +} + +// POST + +type pollingTrackerPost struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPost) updatePollingMethod() error { + // 201 requires Location header + if pt.resp.StatusCode == http.StatusCreated { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "missing Location header in 201 response") + } else { + pt.URI = lh + pt.FinalGetURI = lh + pt.Pm = PollingLocation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + // when both headers are returned we use the value in the Location header for the final GET + pt.FinalGetURI = lh + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerPost) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerPost) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent +} + +// PUT + +type pollingTrackerPut struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPut) updatePollingMethod() error { + // by default we can use the original URL for polling and final GET + if pt.URI == "" { + pt.URI = pt.resp.Request.URL.String() + } + if pt.FinalGetURI == "" { + pt.FinalGetURI = pt.resp.Request.URL.String() + } + if pt.Pm == PollingUnknown { + pt.Pm = PollingRequestURI + } + // for 201 it's permissible for no headers to be returned + if pt.resp.StatusCode == http.StatusCreated { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPut", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerPut) checkForErrors() error { + err := pt.baseCheckForErrors() + if err != nil { + return err + } + // if there are no LRO headers then the body cannot be empty + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } + lh, err := getURLFromLocationHeader(pt.resp) + if err != nil { + return err + } + if ao == "" && lh == "" && len(pt.rawBody) == 0 { + return autorest.NewError("pollingTrackerPut", "checkForErrors", "the response did not contain a body") + } + return nil +} + +func (pt pollingTrackerPut) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated +} + +// creates a polling tracker based on the verb of the original request +func createPollingTracker(resp *http.Response) (pollingTracker, error) { + var pt pollingTracker + switch strings.ToUpper(resp.Request.Method) { + case http.MethodDelete: + pt = &pollingTrackerDelete{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPatch: + pt = &pollingTrackerPatch{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPost: + pt = &pollingTrackerPost{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPut: + pt = &pollingTrackerPut{pollingTrackerBase: pollingTrackerBase{resp: resp}} + default: + return nil, autorest.NewError("azure", "createPollingTracker", "unsupported HTTP method %s", resp.Request.Method) + } + if err := pt.initializeState(); err != nil { + return pt, err + } + // this initializes the polling header values, we do this during creation in case the + // initial response send us invalid values; this way the API call will return a non-nil + // error (not doing this means the error shows up in Future.Done) + return pt, pt.updatePollingMethod() +} + +// gets the polling URL from the Azure-AsyncOperation header. +// ensures the URL is well-formed and absolute. +func getURLFromAsyncOpHeader(resp *http.Response) (string, error) { + s := resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) + if s == "" { + return "", nil + } + if !isValidURL(s) { + return "", autorest.NewError("azure", "getURLFromAsyncOpHeader", "invalid polling URL '%s'", s) + } + return s, nil +} + +// gets the polling URL from the Location header. +// ensures the URL is well-formed and absolute. +func getURLFromLocationHeader(resp *http.Response) (string, error) { + s := resp.Header.Get(http.CanonicalHeaderKey(autorest.HeaderLocation)) + if s == "" { + return "", nil + } + if !isValidURL(s) { + return "", autorest.NewError("azure", "getURLFromLocationHeader", "invalid polling URL '%s'", s) + } + return s, nil +} + +// verify that the URL is valid and absolute +func isValidURL(s string) bool { + u, err := url.Parse(s) + return err == nil && u.IsAbs() +} + +// PollingMethodType defines a type used for enumerating polling mechanisms. +type PollingMethodType string + +const ( + // PollingAsyncOperation indicates the polling method uses the Azure-AsyncOperation header. + PollingAsyncOperation PollingMethodType = "AsyncOperation" + + // PollingLocation indicates the polling method uses the Location header. + PollingLocation PollingMethodType = "Location" + + // PollingRequestURI indicates the polling method uses the original request URI. + PollingRequestURI PollingMethodType = "RequestURI" + + // PollingUnknown indicates an unknown polling method and is the default value. + PollingUnknown PollingMethodType = "" +) + +// AsyncOpIncompleteError is the type that's returned from a future that has not completed. +type AsyncOpIncompleteError struct { + // FutureType is the name of the type composed of a azure.Future. + FutureType string +} + +// Error returns an error message including the originating type name of the error. +func (e AsyncOpIncompleteError) Error() string { + return fmt.Sprintf("%s: asynchronous operation has not completed", e.FutureType) +} + +// NewAsyncOpIncompleteError creates a new AsyncOpIncompleteError with the specified parameters. +func NewAsyncOpIncompleteError(futureType string) AsyncOpIncompleteError { + return AsyncOpIncompleteError{ + FutureType: futureType, + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go new file mode 100644 index 000000000..868345db6 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go @@ -0,0 +1,388 @@ +// Package azure provides Azure-specific implementations used with AutoRest. +// See the included examples for more detail. +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strconv" + "strings" + + "github.com/Azure/go-autorest/autorest" +) + +const ( + // HeaderClientID is the Azure extension header to set a user-specified request ID. + HeaderClientID = "x-ms-client-request-id" + + // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID + // should be included in the response. + HeaderReturnClientID = "x-ms-return-client-request-id" + + // HeaderContentType is the type of the content in the HTTP response. + HeaderContentType = "Content-Type" + + // HeaderRequestID is the Azure extension header of the service generated request ID returned + // in the response. + HeaderRequestID = "x-ms-request-id" +) + +// ServiceError encapsulates the error response from an Azure service. +// It adhears to the OData v4 specification for error responses. +type ServiceError struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details []map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` +} + +func (se ServiceError) Error() string { + result := fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) + + if se.Target != nil { + result += fmt.Sprintf(" Target=%q", *se.Target) + } + + if se.Details != nil { + d, err := json.Marshal(se.Details) + if err != nil { + result += fmt.Sprintf(" Details=%v", se.Details) + } + result += fmt.Sprintf(" Details=%s", d) + } + + if se.InnerError != nil { + d, err := json.Marshal(se.InnerError) + if err != nil { + result += fmt.Sprintf(" InnerError=%v", se.InnerError) + } + result += fmt.Sprintf(" InnerError=%s", d) + } + + if se.AdditionalInfo != nil { + d, err := json.Marshal(se.AdditionalInfo) + if err != nil { + result += fmt.Sprintf(" AdditionalInfo=%v", se.AdditionalInfo) + } + result += fmt.Sprintf(" AdditionalInfo=%s", d) + } + + return result +} + +// UnmarshalJSON implements the json.Unmarshaler interface for the ServiceError type. +func (se *ServiceError) UnmarshalJSON(b []byte) error { + // http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091 + + type serviceErrorInternal struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target,omitempty"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo,omitempty"` + // not all services conform to the OData v4 spec. + // the following fields are where we've seen discrepancies + + // spec calls for []map[string]interface{} but have seen map[string]interface{} + Details interface{} `json:"details,omitempty"` + + // spec calls for map[string]interface{} but have seen []map[string]interface{} and string + InnerError interface{} `json:"innererror,omitempty"` + } + + sei := serviceErrorInternal{} + if err := json.Unmarshal(b, &sei); err != nil { + return err + } + + // copy the fields we know to be correct + se.AdditionalInfo = sei.AdditionalInfo + se.Code = sei.Code + se.Message = sei.Message + se.Target = sei.Target + + // converts an []interface{} to []map[string]interface{} + arrayOfObjs := func(v interface{}) ([]map[string]interface{}, bool) { + arrayOf, ok := v.([]interface{}) + if !ok { + return nil, false + } + final := []map[string]interface{}{} + for _, item := range arrayOf { + as, ok := item.(map[string]interface{}) + if !ok { + return nil, false + } + final = append(final, as) + } + return final, true + } + + // convert the remaining fields, falling back to raw JSON if necessary + + if c, ok := arrayOfObjs(sei.Details); ok { + se.Details = c + } else if c, ok := sei.Details.(map[string]interface{}); ok { + se.Details = []map[string]interface{}{c} + } else if sei.Details != nil { + // stuff into Details + se.Details = []map[string]interface{}{ + {"raw": sei.Details}, + } + } + + if c, ok := sei.InnerError.(map[string]interface{}); ok { + se.InnerError = c + } else if c, ok := arrayOfObjs(sei.InnerError); ok { + // if there's only one error extract it + if len(c) == 1 { + se.InnerError = c[0] + } else { + // multiple errors, stuff them into the value + se.InnerError = map[string]interface{}{ + "multi": c, + } + } + } else if c, ok := sei.InnerError.(string); ok { + se.InnerError = map[string]interface{}{"error": c} + } else if sei.InnerError != nil { + // stuff into InnerError + se.InnerError = map[string]interface{}{ + "raw": sei.InnerError, + } + } + return nil +} + +// RequestError describes an error response returned by Azure service. +type RequestError struct { + autorest.DetailedError + + // The error returned by the Azure service. + ServiceError *ServiceError `json:"error" xml:"Error"` + + // The request id (from the x-ms-request-id-header) of the request. + RequestID string +} + +// Error returns a human-friendly error message from service error. +func (e RequestError) Error() string { + return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v", + e.StatusCode, e.ServiceError) +} + +// IsAzureError returns true if the passed error is an Azure Service error; false otherwise. +func IsAzureError(e error) bool { + _, ok := e.(*RequestError) + return ok +} + +// Resource contains details about an Azure resource. +type Resource struct { + SubscriptionID string + ResourceGroup string + Provider string + ResourceType string + ResourceName string +} + +// String function returns a string in form of azureResourceID +func (r Resource) String() string { + return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/%s/%s/%s", r.SubscriptionID, r.ResourceGroup, r.Provider, r.ResourceType, r.ResourceName) +} + +// ParseResourceID parses a resource ID into a ResourceDetails struct. +// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/templates/template-functions-resource?tabs=json#resourceid. +func ParseResourceID(resourceID string) (Resource, error) { + + const resourceIDPatternText = `(?i)^/subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)$` + resourceIDPattern := regexp.MustCompile(resourceIDPatternText) + match := resourceIDPattern.FindStringSubmatch(resourceID) + + if len(match) == 0 { + return Resource{}, fmt.Errorf("parsing failed for %s. Invalid resource Id format", resourceID) + } + + v := strings.Split(match[5], "/") + resourceName := v[len(v)-1] + + result := Resource{ + SubscriptionID: match[1], + ResourceGroup: match[2], + Provider: match[3], + ResourceType: match[4], + ResourceName: resourceName, + } + + return result, nil +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError { + if v, ok := original.(*RequestError); ok { + return *v + } + + statusCode := autorest.UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + return RequestError{ + DetailedError: autorest.DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + }, + } +} + +// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id +// header to true such that UUID accompanies the http.Response. +func WithReturningClientID(uuid string) autorest.PrepareDecorator { + preparer := autorest.CreatePreparer( + WithClientID(uuid), + WithReturnClientID(true)) + + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + return preparer.Prepare(r) + }) + } +} + +// WithClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). +func WithClientID(uuid string) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderClientID, uuid) +} + +// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-return-client-request-id whose boolean value indicates if the value of the +// x-ms-client-request-id header should be included in the http.Response. +func WithReturnClientID(b bool) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) +} + +// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the +// http.Request sent to the service (and returned in the http.Response) +func ExtractClientID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderClientID, resp) +} + +// ExtractRequestID extracts the Azure server generated request identifier from the +// x-ms-request-id header. +func ExtractRequestID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderRequestID, resp) +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an +// azure.RequestError by reading the response body unless the response HTTP status code +// is among the set passed. +// +// If there is a chance service may return responses other than the Azure error +// format and the response cannot be parsed into an error, a decoding error will +// be returned containing the response body. In any case, the Responder will +// return an error if the status code is not satisfied. +// +// If this Responder returns an error, the response body will be replaced with +// an in-memory reader, which needs no further closing. +func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { + return func(r autorest.Responder) autorest.Responder { + return autorest.ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) { + var e RequestError + defer resp.Body.Close() + + encodedAs := autorest.EncodedAsJSON + if strings.Contains(resp.Header.Get("Content-Type"), "xml") { + encodedAs = autorest.EncodedAsXML + } + + // Copy and replace the Body in case it does not contain an error object. + // This will leave the Body available to the caller. + b, decodeErr := autorest.CopyAndDecode(encodedAs, resp.Body, &e) + resp.Body = ioutil.NopCloser(&b) + if decodeErr != nil { + return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b, decodeErr) + } + if e.ServiceError == nil { + // Check if error is unwrapped ServiceError + decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes())) + if err := decoder.Decode(&e.ServiceError); err != nil { + return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b, err) + } + + // for example, should the API return the literal value `null` as the response + if e.ServiceError == nil { + e.ServiceError = &ServiceError{ + Code: "Unknown", + Message: "Unknown service error", + Details: []map[string]interface{}{ + { + "HttpResponse.Body": b.String(), + }, + }, + } + } + } + + if e.ServiceError != nil && e.ServiceError.Message == "" { + // if we're here it means the returned error wasn't OData v4 compliant. + // try to unmarshal the body in hopes of getting something. + rawBody := map[string]interface{}{} + decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes())) + if err := decoder.Decode(&rawBody); err != nil { + return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b, err) + } + + e.ServiceError = &ServiceError{ + Code: "Unknown", + Message: "Unknown service error", + } + if len(rawBody) > 0 { + e.ServiceError.Details = []map[string]interface{}{rawBody} + } + } + e.Response = resp + e.RequestID = ExtractRequestID(resp) + if e.StatusCode == nil { + e.StatusCode = resp.StatusCode + } + err = &e + } + return err + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go new file mode 100644 index 000000000..b0a53769f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -0,0 +1,331 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "strings" +) + +const ( + // EnvironmentFilepathName captures the name of the environment variable containing the path to the file + // to be used while populating the Azure Environment. + EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH" + + // NotAvailable is used for endpoints and resource IDs that are not available for a given cloud. + NotAvailable = "N/A" +) + +var environments = map[string]Environment{ + "AZURECHINACLOUD": ChinaCloud, + "AZUREGERMANCLOUD": GermanCloud, + "AZURECLOUD": PublicCloud, + "AZUREPUBLICCLOUD": PublicCloud, + "AZUREUSGOVERNMENT": USGovernmentCloud, + "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, //TODO: deprecate +} + +// ResourceIdentifier contains a set of Azure resource IDs. +type ResourceIdentifier struct { + Graph string `json:"graph"` + KeyVault string `json:"keyVault"` + Datalake string `json:"datalake"` + Batch string `json:"batch"` + OperationalInsights string `json:"operationalInsights"` + OSSRDBMS string `json:"ossRDBMS"` + Storage string `json:"storage"` + Synapse string `json:"synapse"` + ServiceBus string `json:"serviceBus"` + SQLDatabase string `json:"sqlDatabase"` + CosmosDB string `json:"cosmosDB"` + ManagedHSM string `json:"managedHSM"` + MicrosoftGraph string `json:"microsoftGraph"` +} + +// Environment represents a set of endpoints for each of Azure's Clouds. +type Environment struct { + Name string `json:"name"` + ManagementPortalURL string `json:"managementPortalURL"` + PublishSettingsURL string `json:"publishSettingsURL"` + ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` + ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` + ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` + GalleryEndpoint string `json:"galleryEndpoint"` + KeyVaultEndpoint string `json:"keyVaultEndpoint"` + ManagedHSMEndpoint string `json:"managedHSMEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + ServiceBusEndpoint string `json:"serviceBusEndpoint"` + BatchManagementEndpoint string `json:"batchManagementEndpoint"` + MicrosoftGraphEndpoint string `json:"microsoftGraphEndpoint"` + StorageEndpointSuffix string `json:"storageEndpointSuffix"` + CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"` + MariaDBDNSSuffix string `json:"mariaDBDNSSuffix"` + MySQLDatabaseDNSSuffix string `json:"mySqlDatabaseDNSSuffix"` + PostgresqlDatabaseDNSSuffix string `json:"postgresqlDatabaseDNSSuffix"` + SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` + TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` + KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` + ManagedHSMDNSSuffix string `json:"managedHSMDNSSuffix"` + ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` + ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` + ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` + ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` + TokenAudience string `json:"tokenAudience"` + APIManagementHostNameSuffix string `json:"apiManagementHostNameSuffix"` + SynapseEndpointSuffix string `json:"synapseEndpointSuffix"` + DatalakeSuffix string `json:"datalakeSuffix"` + ResourceIdentifiers ResourceIdentifier `json:"resourceIdentifiers"` +} + +var ( + // PublicCloud is the default public Azure cloud environment + PublicCloud = Environment{ + Name: "AzurePublicCloud", + ManagementPortalURL: "https://manage.windowsazure.com/", + PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.windows.net/", + ResourceManagerEndpoint: "https://management.azure.com/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", + GalleryEndpoint: "https://gallery.azure.com/", + KeyVaultEndpoint: "https://vault.azure.net/", + ManagedHSMEndpoint: "https://managedhsm.azure.net/", + GraphEndpoint: "https://graph.windows.net/", + ServiceBusEndpoint: "https://servicebus.windows.net/", + BatchManagementEndpoint: "https://batch.core.windows.net/", + MicrosoftGraphEndpoint: "https://graph.microsoft.com/", + StorageEndpointSuffix: "core.windows.net", + CosmosDBDNSSuffix: "documents.azure.com", + MariaDBDNSSuffix: "mariadb.database.azure.com", + MySQLDatabaseDNSSuffix: "mysql.database.azure.com", + PostgresqlDatabaseDNSSuffix: "postgres.database.azure.com", + SQLDatabaseDNSSuffix: "database.windows.net", + TrafficManagerDNSSuffix: "trafficmanager.net", + KeyVaultDNSSuffix: "vault.azure.net", + ManagedHSMDNSSuffix: "managedhsm.azure.net", + ServiceBusEndpointSuffix: "servicebus.windows.net", + ServiceManagementVMDNSSuffix: "cloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.azure.com", + ContainerRegistryDNSSuffix: "azurecr.io", + TokenAudience: "https://management.azure.com/", + APIManagementHostNameSuffix: "azure-api.net", + SynapseEndpointSuffix: "dev.azuresynapse.net", + DatalakeSuffix: "azuredatalakestore.net", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.windows.net/", + KeyVault: "https://vault.azure.net", + Datalake: "https://datalake.azure.net/", + Batch: "https://batch.core.windows.net/", + OperationalInsights: "https://api.loganalytics.io", + OSSRDBMS: "https://ossrdbms-aad.database.windows.net", + Storage: "https://storage.azure.com/", + Synapse: "https://dev.azuresynapse.net", + ServiceBus: "https://servicebus.azure.net/", + SQLDatabase: "https://database.windows.net/", + CosmosDB: "https://cosmos.azure.com", + ManagedHSM: "https://managedhsm.azure.net", + MicrosoftGraph: "https://graph.microsoft.com/", + }, + } + + // USGovernmentCloud is the cloud environment for the US Government + USGovernmentCloud = Environment{ + Name: "AzureUSGovernmentCloud", + ManagementPortalURL: "https://manage.windowsazure.us/", + PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", + ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.us/", + GalleryEndpoint: "https://gallery.usgovcloudapi.net/", + KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", + ManagedHSMEndpoint: NotAvailable, + GraphEndpoint: "https://graph.windows.net/", + ServiceBusEndpoint: "https://servicebus.usgovcloudapi.net/", + BatchManagementEndpoint: "https://batch.core.usgovcloudapi.net/", + MicrosoftGraphEndpoint: "https://graph.microsoft.us/", + StorageEndpointSuffix: "core.usgovcloudapi.net", + CosmosDBDNSSuffix: "documents.azure.us", + MariaDBDNSSuffix: "mariadb.database.usgovcloudapi.net", + MySQLDatabaseDNSSuffix: "mysql.database.usgovcloudapi.net", + PostgresqlDatabaseDNSSuffix: "postgres.database.usgovcloudapi.net", + SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", + TrafficManagerDNSSuffix: "usgovtrafficmanager.net", + KeyVaultDNSSuffix: "vault.usgovcloudapi.net", + ManagedHSMDNSSuffix: NotAvailable, + ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", + ServiceManagementVMDNSSuffix: "usgovcloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.usgovcloudapi.net", + ContainerRegistryDNSSuffix: "azurecr.us", + TokenAudience: "https://management.usgovcloudapi.net/", + APIManagementHostNameSuffix: "azure-api.us", + SynapseEndpointSuffix: "dev.azuresynapse.usgovcloudapi.net", + DatalakeSuffix: NotAvailable, + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.windows.net/", + KeyVault: "https://vault.usgovcloudapi.net", + Datalake: NotAvailable, + Batch: "https://batch.core.usgovcloudapi.net/", + OperationalInsights: "https://api.loganalytics.us", + OSSRDBMS: "https://ossrdbms-aad.database.usgovcloudapi.net", + Storage: "https://storage.azure.com/", + Synapse: "https://dev.azuresynapse.usgovcloudapi.net", + ServiceBus: "https://servicebus.azure.net/", + SQLDatabase: "https://database.usgovcloudapi.net/", + CosmosDB: "https://cosmos.azure.com", + ManagedHSM: NotAvailable, + MicrosoftGraph: "https://graph.microsoft.us/", + }, + } + + // ChinaCloud is the cloud environment operated in China + ChinaCloud = Environment{ + Name: "AzureChinaCloud", + ManagementPortalURL: "https://manage.chinacloudapi.com/", + PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", + ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", + ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/", + GalleryEndpoint: "https://gallery.chinacloudapi.cn/", + KeyVaultEndpoint: "https://vault.azure.cn/", + ManagedHSMEndpoint: NotAvailable, + GraphEndpoint: "https://graph.chinacloudapi.cn/", + ServiceBusEndpoint: "https://servicebus.chinacloudapi.cn/", + BatchManagementEndpoint: "https://batch.chinacloudapi.cn/", + MicrosoftGraphEndpoint: "https://microsoftgraph.chinacloudapi.cn/", + StorageEndpointSuffix: "core.chinacloudapi.cn", + CosmosDBDNSSuffix: "documents.azure.cn", + MariaDBDNSSuffix: "mariadb.database.chinacloudapi.cn", + MySQLDatabaseDNSSuffix: "mysql.database.chinacloudapi.cn", + PostgresqlDatabaseDNSSuffix: "postgres.database.chinacloudapi.cn", + SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", + TrafficManagerDNSSuffix: "trafficmanager.cn", + KeyVaultDNSSuffix: "vault.azure.cn", + ManagedHSMDNSSuffix: NotAvailable, + ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn", + ServiceManagementVMDNSSuffix: "chinacloudapp.cn", + ResourceManagerVMDNSSuffix: "cloudapp.chinacloudapi.cn", + ContainerRegistryDNSSuffix: "azurecr.cn", + TokenAudience: "https://management.chinacloudapi.cn/", + APIManagementHostNameSuffix: "azure-api.cn", + SynapseEndpointSuffix: "dev.azuresynapse.azure.cn", + DatalakeSuffix: NotAvailable, + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.chinacloudapi.cn/", + KeyVault: "https://vault.azure.cn", + Datalake: NotAvailable, + Batch: "https://batch.chinacloudapi.cn/", + OperationalInsights: NotAvailable, + OSSRDBMS: "https://ossrdbms-aad.database.chinacloudapi.cn", + Storage: "https://storage.azure.com/", + Synapse: "https://dev.azuresynapse.net", + ServiceBus: "https://servicebus.azure.net/", + SQLDatabase: "https://database.chinacloudapi.cn/", + CosmosDB: "https://cosmos.azure.com", + ManagedHSM: NotAvailable, + MicrosoftGraph: "https://microsoftgraph.chinacloudapi.cn", + }, + } + + // GermanCloud is the cloud environment operated in Germany + GermanCloud = Environment{ + Name: "AzureGermanCloud", + ManagementPortalURL: "http://portal.microsoftazure.de/", + PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.cloudapi.de/", + ResourceManagerEndpoint: "https://management.microsoftazure.de/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", + GalleryEndpoint: "https://gallery.cloudapi.de/", + KeyVaultEndpoint: "https://vault.microsoftazure.de/", + ManagedHSMEndpoint: NotAvailable, + GraphEndpoint: "https://graph.cloudapi.de/", + ServiceBusEndpoint: "https://servicebus.cloudapi.de/", + BatchManagementEndpoint: "https://batch.cloudapi.de/", + MicrosoftGraphEndpoint: NotAvailable, + StorageEndpointSuffix: "core.cloudapi.de", + CosmosDBDNSSuffix: "documents.microsoftazure.de", + MariaDBDNSSuffix: "mariadb.database.cloudapi.de", + MySQLDatabaseDNSSuffix: "mysql.database.cloudapi.de", + PostgresqlDatabaseDNSSuffix: "postgres.database.cloudapi.de", + SQLDatabaseDNSSuffix: "database.cloudapi.de", + TrafficManagerDNSSuffix: "azuretrafficmanager.de", + KeyVaultDNSSuffix: "vault.microsoftazure.de", + ManagedHSMDNSSuffix: NotAvailable, + ServiceBusEndpointSuffix: "servicebus.cloudapi.de", + ServiceManagementVMDNSSuffix: "azurecloudapp.de", + ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", + ContainerRegistryDNSSuffix: NotAvailable, + TokenAudience: "https://management.microsoftazure.de/", + APIManagementHostNameSuffix: NotAvailable, + SynapseEndpointSuffix: NotAvailable, + DatalakeSuffix: NotAvailable, + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.cloudapi.de/", + KeyVault: "https://vault.microsoftazure.de", + Datalake: NotAvailable, + Batch: "https://batch.cloudapi.de/", + OperationalInsights: NotAvailable, + OSSRDBMS: "https://ossrdbms-aad.database.cloudapi.de", + Storage: "https://storage.azure.com/", + Synapse: NotAvailable, + ServiceBus: "https://servicebus.azure.net/", + SQLDatabase: "https://database.cloudapi.de/", + CosmosDB: "https://cosmos.azure.com", + ManagedHSM: NotAvailable, + MicrosoftGraph: NotAvailable, + }, + } +) + +// EnvironmentFromName returns an Environment based on the common name specified. +func EnvironmentFromName(name string) (Environment, error) { + // IMPORTANT + // As per @radhikagupta5: + // This is technical debt, fundamentally here because Kubernetes is not currently accepting + // contributions to the providers. Once that is an option, the provider should be updated to + // directly call `EnvironmentFromFile`. Until then, we rely on dispatching Azure Stack environment creation + // from this method based on the name that is provided to us. + if strings.EqualFold(name, "AZURESTACKCLOUD") { + return EnvironmentFromFile(os.Getenv(EnvironmentFilepathName)) + } + + name = strings.ToUpper(name) + env, ok := environments[name] + if !ok { + return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) + } + + return env, nil +} + +// EnvironmentFromFile loads an Environment from a configuration file available on disk. +// This function is particularly useful in the Hybrid Cloud model, where one must define their own +// endpoints. +func EnvironmentFromFile(location string) (unmarshaled Environment, err error) { + fileContents, err := ioutil.ReadFile(location) + if err != nil { + return + } + + err = json.Unmarshal(fileContents, &unmarshaled) + + return +} + +// SetEnvironment updates the environment map with the specified values. +func SetEnvironment(name string, env Environment) { + environments[strings.ToUpper(name)] = env +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go new file mode 100644 index 000000000..507f9e95c --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go @@ -0,0 +1,245 @@ +package azure + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" +) + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +type audience []string + +type authentication struct { + LoginEndpoint string `json:"loginEndpoint"` + Audiences audience `json:"audiences"` +} + +type environmentMetadataInfo struct { + GalleryEndpoint string `json:"galleryEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + PortalEndpoint string `json:"portalEndpoint"` + Authentication authentication `json:"authentication"` +} + +// EnvironmentProperty represent property names that clients can override +type EnvironmentProperty string + +const ( + // EnvironmentName ... + EnvironmentName EnvironmentProperty = "name" + // EnvironmentManagementPortalURL .. + EnvironmentManagementPortalURL EnvironmentProperty = "managementPortalURL" + // EnvironmentPublishSettingsURL ... + EnvironmentPublishSettingsURL EnvironmentProperty = "publishSettingsURL" + // EnvironmentServiceManagementEndpoint ... + EnvironmentServiceManagementEndpoint EnvironmentProperty = "serviceManagementEndpoint" + // EnvironmentResourceManagerEndpoint ... + EnvironmentResourceManagerEndpoint EnvironmentProperty = "resourceManagerEndpoint" + // EnvironmentActiveDirectoryEndpoint ... + EnvironmentActiveDirectoryEndpoint EnvironmentProperty = "activeDirectoryEndpoint" + // EnvironmentGalleryEndpoint ... + EnvironmentGalleryEndpoint EnvironmentProperty = "galleryEndpoint" + // EnvironmentKeyVaultEndpoint ... + EnvironmentKeyVaultEndpoint EnvironmentProperty = "keyVaultEndpoint" + // EnvironmentGraphEndpoint ... + EnvironmentGraphEndpoint EnvironmentProperty = "graphEndpoint" + // EnvironmentServiceBusEndpoint ... + EnvironmentServiceBusEndpoint EnvironmentProperty = "serviceBusEndpoint" + // EnvironmentBatchManagementEndpoint ... + EnvironmentBatchManagementEndpoint EnvironmentProperty = "batchManagementEndpoint" + // EnvironmentStorageEndpointSuffix ... + EnvironmentStorageEndpointSuffix EnvironmentProperty = "storageEndpointSuffix" + // EnvironmentSQLDatabaseDNSSuffix ... + EnvironmentSQLDatabaseDNSSuffix EnvironmentProperty = "sqlDatabaseDNSSuffix" + // EnvironmentTrafficManagerDNSSuffix ... + EnvironmentTrafficManagerDNSSuffix EnvironmentProperty = "trafficManagerDNSSuffix" + // EnvironmentKeyVaultDNSSuffix ... + EnvironmentKeyVaultDNSSuffix EnvironmentProperty = "keyVaultDNSSuffix" + // EnvironmentServiceBusEndpointSuffix ... + EnvironmentServiceBusEndpointSuffix EnvironmentProperty = "serviceBusEndpointSuffix" + // EnvironmentServiceManagementVMDNSSuffix ... + EnvironmentServiceManagementVMDNSSuffix EnvironmentProperty = "serviceManagementVMDNSSuffix" + // EnvironmentResourceManagerVMDNSSuffix ... + EnvironmentResourceManagerVMDNSSuffix EnvironmentProperty = "resourceManagerVMDNSSuffix" + // EnvironmentContainerRegistryDNSSuffix ... + EnvironmentContainerRegistryDNSSuffix EnvironmentProperty = "containerRegistryDNSSuffix" + // EnvironmentTokenAudience ... + EnvironmentTokenAudience EnvironmentProperty = "tokenAudience" +) + +// OverrideProperty represents property name and value that clients can override +type OverrideProperty struct { + Key EnvironmentProperty + Value string +} + +// EnvironmentFromURL loads an Environment from a URL +// This function is particularly useful in the Hybrid Cloud model, where one may define their own +// endpoints. +func EnvironmentFromURL(resourceManagerEndpoint string, properties ...OverrideProperty) (environment Environment, err error) { + var metadataEnvProperties environmentMetadataInfo + + if resourceManagerEndpoint == "" { + return environment, fmt.Errorf("Metadata resource manager endpoint is empty") + } + + if metadataEnvProperties, err = retrieveMetadataEnvironment(resourceManagerEndpoint); err != nil { + return environment, err + } + + // Give priority to user's override values + overrideProperties(&environment, properties) + + if environment.Name == "" { + environment.Name = "HybridEnvironment" + } + stampDNSSuffix := environment.StorageEndpointSuffix + if stampDNSSuffix == "" { + stampDNSSuffix = strings.TrimSuffix(strings.TrimPrefix(strings.Replace(resourceManagerEndpoint, strings.Split(resourceManagerEndpoint, ".")[0], "", 1), "."), "/") + environment.StorageEndpointSuffix = stampDNSSuffix + } + if environment.KeyVaultDNSSuffix == "" { + environment.KeyVaultDNSSuffix = fmt.Sprintf("%s.%s", "vault", stampDNSSuffix) + } + if environment.KeyVaultEndpoint == "" { + environment.KeyVaultEndpoint = fmt.Sprintf("%s%s", "https://", environment.KeyVaultDNSSuffix) + } + if environment.TokenAudience == "" { + environment.TokenAudience = metadataEnvProperties.Authentication.Audiences[0] + } + if environment.ActiveDirectoryEndpoint == "" { + environment.ActiveDirectoryEndpoint = metadataEnvProperties.Authentication.LoginEndpoint + } + if environment.ResourceManagerEndpoint == "" { + environment.ResourceManagerEndpoint = resourceManagerEndpoint + } + if environment.GalleryEndpoint == "" { + environment.GalleryEndpoint = metadataEnvProperties.GalleryEndpoint + } + if environment.GraphEndpoint == "" { + environment.GraphEndpoint = metadataEnvProperties.GraphEndpoint + } + + return environment, nil +} + +func overrideProperties(environment *Environment, properties []OverrideProperty) { + for _, property := range properties { + switch property.Key { + case EnvironmentName: + { + environment.Name = property.Value + } + case EnvironmentManagementPortalURL: + { + environment.ManagementPortalURL = property.Value + } + case EnvironmentPublishSettingsURL: + { + environment.PublishSettingsURL = property.Value + } + case EnvironmentServiceManagementEndpoint: + { + environment.ServiceManagementEndpoint = property.Value + } + case EnvironmentResourceManagerEndpoint: + { + environment.ResourceManagerEndpoint = property.Value + } + case EnvironmentActiveDirectoryEndpoint: + { + environment.ActiveDirectoryEndpoint = property.Value + } + case EnvironmentGalleryEndpoint: + { + environment.GalleryEndpoint = property.Value + } + case EnvironmentKeyVaultEndpoint: + { + environment.KeyVaultEndpoint = property.Value + } + case EnvironmentGraphEndpoint: + { + environment.GraphEndpoint = property.Value + } + case EnvironmentServiceBusEndpoint: + { + environment.ServiceBusEndpoint = property.Value + } + case EnvironmentBatchManagementEndpoint: + { + environment.BatchManagementEndpoint = property.Value + } + case EnvironmentStorageEndpointSuffix: + { + environment.StorageEndpointSuffix = property.Value + } + case EnvironmentSQLDatabaseDNSSuffix: + { + environment.SQLDatabaseDNSSuffix = property.Value + } + case EnvironmentTrafficManagerDNSSuffix: + { + environment.TrafficManagerDNSSuffix = property.Value + } + case EnvironmentKeyVaultDNSSuffix: + { + environment.KeyVaultDNSSuffix = property.Value + } + case EnvironmentServiceBusEndpointSuffix: + { + environment.ServiceBusEndpointSuffix = property.Value + } + case EnvironmentServiceManagementVMDNSSuffix: + { + environment.ServiceManagementVMDNSSuffix = property.Value + } + case EnvironmentResourceManagerVMDNSSuffix: + { + environment.ResourceManagerVMDNSSuffix = property.Value + } + case EnvironmentContainerRegistryDNSSuffix: + { + environment.ContainerRegistryDNSSuffix = property.Value + } + case EnvironmentTokenAudience: + { + environment.TokenAudience = property.Value + } + } + } +} + +func retrieveMetadataEnvironment(endpoint string) (environment environmentMetadataInfo, err error) { + client := autorest.NewClientWithUserAgent("") + managementEndpoint := fmt.Sprintf("%s%s", strings.TrimSuffix(endpoint, "/"), "/metadata/endpoints?api-version=1.0") + req, _ := http.NewRequest("GET", managementEndpoint, nil) + response, err := client.Do(req) + if err != nil { + return environment, err + } + defer response.Body.Close() + jsonResponse, err := ioutil.ReadAll(response.Body) + if err != nil { + return environment, err + } + err = json.Unmarshal(jsonResponse, &environment) + return environment, err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go new file mode 100644 index 000000000..5b52357f9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go @@ -0,0 +1,204 @@ +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package azure + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" +) + +// DoRetryWithRegistration tries to register the resource provider in case it is unregistered. +// It also handles request retries +func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := autorest.NewRetriableRequest(r) + for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + + resp, err = autorest.SendWithSender(s, rr.Request(), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return resp, err + } + + if resp.StatusCode != http.StatusConflict || client.SkipResourceProviderRegistration { + return resp, err + } + + var re RequestError + if strings.Contains(r.Header.Get("Content-Type"), "xml") { + // XML errors (e.g. Storage Data Plane) only return the inner object + err = autorest.Respond(resp, autorest.ByUnmarshallingXML(&re.ServiceError)) + } else { + err = autorest.Respond(resp, autorest.ByUnmarshallingJSON(&re)) + } + + if err != nil { + return resp, err + } + err = re + + if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" { + regErr := register(client, r, re) + if regErr != nil { + return resp, fmt.Errorf("failed auto registering Resource Provider: %s. Original error: %w", regErr, err) + } + } + } + return resp, err + }) + } +} + +func getProvider(re RequestError) (string, error) { + if re.ServiceError != nil && len(re.ServiceError.Details) > 0 { + return re.ServiceError.Details[0]["target"].(string), nil + } + return "", errors.New("provider was not found in the response") +} + +func register(client autorest.Client, originalReq *http.Request, re RequestError) error { + subID := getSubscription(originalReq.URL.Path) + if subID == "" { + return errors.New("missing parameter subscriptionID to register resource provider") + } + providerName, err := getProvider(re) + if err != nil { + return fmt.Errorf("missing parameter provider to register resource provider: %s", err) + } + newURL := url.URL{ + Scheme: originalReq.URL.Scheme, + Host: originalReq.URL.Host, + } + + // taken from the resources SDK + // with almost identical code, this sections are easier to mantain + // It is also not a good idea to import the SDK here + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252 + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": autorest.Encode("path", providerName), + "subscriptionId": autorest.Encode("path", subID), + } + + const APIVersion = "2016-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + + req, err := preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req = req.WithContext(originalReq.Context()) + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + type Provider struct { + RegistrationState *string `json:"registrationState,omitempty"` + } + var provider Provider + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + // poll for registered provisioning state + registrationStartTime := time.Now() + for err == nil && (client.PollingDuration == 0 || (client.PollingDuration != 0 && time.Since(registrationStartTime) < client.PollingDuration)) { + // taken from the resources SDK + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + req, err = preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req = req.WithContext(originalReq.Context()) + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + if provider.RegistrationState != nil && + *provider.RegistrationState == "Registered" { + break + } + + delayed := autorest.DelayWithRetryAfter(resp, originalReq.Context().Done()) + if !delayed && !autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Context().Done()) { + return originalReq.Context().Err() + } + } + if client.PollingDuration != 0 && !(time.Since(registrationStartTime) < client.PollingDuration) { + return errors.New("polling for resource provider registration has exceeded the polling duration") + } + return err +} + +func getSubscription(path string) string { + parts := strings.Split(path, "/") + for i, v := range parts { + if v == "subscriptions" && (i+1) < len(parts) { + return parts[i+1] + } + } + return "" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go new file mode 100644 index 000000000..bb5f9396e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -0,0 +1,328 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/tls" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "strings" + "time" + + "github.com/Azure/go-autorest/logger" +) + +const ( + // DefaultPollingDelay is a reasonable delay between polling requests. + DefaultPollingDelay = 30 * time.Second + + // DefaultPollingDuration is a reasonable total polling duration. + DefaultPollingDuration = 15 * time.Minute + + // DefaultRetryAttempts is number of attempts for retry status codes (5xx). + DefaultRetryAttempts = 3 + + // DefaultRetryDuration is the duration to wait between retries. + DefaultRetryDuration = 30 * time.Second +) + +var ( + // StatusCodesForRetry are a defined group of status code for which the client will retry + StatusCodesForRetry = []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } +) + +const ( + requestFormat = `HTTP Request Begin =================================================== +%s +===================================================== HTTP Request End +` + responseFormat = `HTTP Response Begin =================================================== +%s +===================================================== HTTP Response End +` +) + +// Response serves as the base for all responses from generated clients. It provides access to the +// last http.Response. +type Response struct { + *http.Response `json:"-"` +} + +// IsHTTPStatus returns true if the returned HTTP status code matches the provided status code. +// If there was no response (i.e. the underlying http.Response is nil) the return value is false. +func (r Response) IsHTTPStatus(statusCode int) bool { + if r.Response == nil { + return false + } + return r.Response.StatusCode == statusCode +} + +// HasHTTPStatus returns true if the returned HTTP status code matches one of the provided status codes. +// If there was no response (i.e. the underlying http.Response is nil) or not status codes are provided +// the return value is false. +func (r Response) HasHTTPStatus(statusCodes ...int) bool { + return ResponseHasStatusCode(r.Response, statusCodes...) +} + +// LoggingInspector implements request and response inspectors that log the full request and +// response to a supplied log. +type LoggingInspector struct { + Logger *log.Logger +} + +// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) WithInspection() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + var body, b bytes.Buffer + + defer r.Body.Close() + + r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) + if err := r.Write(&b); err != nil { + return nil, fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(requestFormat, b.String()) + + r.Body = ioutil.NopCloser(&body) + return p.Prepare(r) + }) + } +} + +// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) ByInspecting() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + var body, b bytes.Buffer + defer resp.Body.Close() + resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) + if err := resp.Write(&b); err != nil { + return fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(responseFormat, b.String()) + + resp.Body = ioutil.NopCloser(&body) + return r.Respond(resp) + }) + } +} + +// Client is the base for autorest generated clients. It provides default, "do nothing" +// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the +// standard, undecorated http.Client as a default Sender. +// +// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and +// return responses that compose with Response. +// +// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom +// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit +// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence +// sending the request by providing a decorated Sender. +type Client struct { + Authorizer Authorizer + Sender Sender + RequestInspector PrepareDecorator + ResponseInspector RespondDecorator + + // PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header + PollingDelay time.Duration + + // PollingDuration sets the maximum polling time after which an error is returned. + // Setting this to zero will use the provided context to control the duration. + PollingDuration time.Duration + + // RetryAttempts sets the total number of times the client will attempt to make an HTTP request. + // Set the value to 1 to disable retries. DO NOT set the value to less than 1. + RetryAttempts int + + // RetryDuration sets the delay duration for retries. + RetryDuration time.Duration + + // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent + // through the Do method. + UserAgent string + + Jar http.CookieJar + + // Set to true to skip attempted registration of resource providers (false by default). + SkipResourceProviderRegistration bool + + // SendDecorators can be used to override the default chain of SendDecorators. + // This can be used to specify things like a custom retry SendDecorator. + // Set this to an empty slice to use no SendDecorators. + SendDecorators []SendDecorator +} + +// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed +// string. +func NewClientWithUserAgent(ua string) Client { + return newClient(ua, tls.RenegotiateNever) +} + +// ClientOptions contains various Client configuration options. +type ClientOptions struct { + // UserAgent is an optional user-agent string to append to the default user agent. + UserAgent string + + // Renegotiation is an optional setting to control client-side TLS renegotiation. + Renegotiation tls.RenegotiationSupport +} + +// NewClientWithOptions returns an instance of a Client with the specified values. +func NewClientWithOptions(options ClientOptions) Client { + return newClient(options.UserAgent, options.Renegotiation) +} + +func newClient(ua string, renegotiation tls.RenegotiationSupport) Client { + c := Client{ + PollingDelay: DefaultPollingDelay, + PollingDuration: DefaultPollingDuration, + RetryAttempts: DefaultRetryAttempts, + RetryDuration: DefaultRetryDuration, + UserAgent: UserAgent(), + } + c.Sender = c.sender(renegotiation) + c.AddToUserAgent(ua) + return c +} + +// AddToUserAgent adds an extension to the current user agent +func (c *Client) AddToUserAgent(extension string) error { + if extension != "" { + c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent) +} + +// Do implements the Sender interface by invoking the active Sender after applying authorization. +// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent +// is set, apply set the User-Agent header. +func (c Client) Do(r *http.Request) (*http.Response, error) { + if r.UserAgent() == "" { + r, _ = Prepare(r, + WithUserAgent(c.UserAgent)) + } + // NOTE: c.WithInspection() must be last in the list so that it can inspect all preceding operations + r, err := Prepare(r, + c.WithAuthorization(), + c.WithInspection()) + if err != nil { + var resp *http.Response + if detErr, ok := err.(DetailedError); ok { + // if the authorization failed (e.g. invalid credentials) there will + // be a response associated with the error, be sure to return it. + resp = detErr.Response + } + return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") + } + logger.Instance.WriteRequest(r, logger.Filter{ + Header: func(k string, v []string) (bool, []string) { + // remove the auth token from the log + if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "Ocp-Apim-Subscription-Key") { + v = []string{"**REDACTED**"} + } + return true, v + }, + }) + resp, err := SendWithSender(c.sender(tls.RenegotiateNever), r) + if resp == nil && err == nil { + err = errors.New("autorest: received nil response and error") + } + logger.Instance.WriteResponse(resp, logger.Filter{}) + Respond(resp, c.ByInspecting()) + return resp, err +} + +// sender returns the Sender to which to send requests. +func (c Client) sender(renengotiation tls.RenegotiationSupport) Sender { + if c.Sender == nil { + return sender(renengotiation) + } + return c.Sender +} + +// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator +// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. +func (c Client) WithAuthorization() PrepareDecorator { + return c.authorizer().WithAuthorization() +} + +// authorizer returns the Authorizer to use. +func (c Client) authorizer() Authorizer { + if c.Authorizer == nil { + return NullAuthorizer{} + } + return c.Authorizer +} + +// WithInspection is a convenience method that passes the request to the supplied RequestInspector, +// if present, or returns the WithNothing PrepareDecorator otherwise. +func (c Client) WithInspection() PrepareDecorator { + if c.RequestInspector == nil { + return WithNothing() + } + return c.RequestInspector +} + +// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, +// if present, or returns the ByIgnoring RespondDecorator otherwise. +func (c Client) ByInspecting() RespondDecorator { + if c.ResponseInspector == nil { + return ByIgnoring() + } + return c.ResponseInspector +} + +// Send sends the provided http.Request using the client's Sender or the default sender. +// It returns the http.Response and possible error. It also accepts a, possibly empty, +// default set of SendDecorators used when sending the request. +// SendDecorators have the following precedence: +// 1. In a request's context via WithSendDecorators() +// 2. Specified on the client in SendDecorators +// 3. The default values specified in this method +func (c Client) Send(req *http.Request, decorators ...SendDecorator) (*http.Response, error) { + if c.SendDecorators != nil { + decorators = c.SendDecorators + } + inCtx := req.Context().Value(ctxSendDecorators{}) + if sd, ok := inCtx.([]SendDecorator); ok { + decorators = sd + } + return SendWithSender(c, req, decorators...) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE new file mode 100644 index 000000000..b9d6a27ea --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/Azure/go-autorest/autorest/date/date.go new file mode 100644 index 000000000..c45710656 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/date.go @@ -0,0 +1,96 @@ +/* +Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) +defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of +time.Time types. And both convert to time.Time through a ToTime method. +*/ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "time" +) + +const ( + fullDate = "2006-01-02" + fullDateJSON = `"2006-01-02"` + dateFormat = "%04d-%02d-%02d" + jsonFormat = `"%04d-%02d-%02d"` +) + +// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., +// 2006-01-02). +type Date struct { + time.Time +} + +// ParseDate create a new Date from the passed string. +func ParseDate(date string) (d Date, err error) { + return parseDate(date, fullDate) +} + +func parseDate(date string, format string) (Date, error) { + d, err := time.Parse(format, date) + return Date{Time: d}, err +} + +// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalBinary() ([]byte, error) { + return d.MarshalText() +} + +// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalBinary(data []byte) error { + return d.UnmarshalText(data) +} + +// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalJSON() (json []byte, err error) { + return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalJSON(data []byte) (err error) { + d.Time, err = time.Parse(fullDateJSON, string(data)) + return err +} + +// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalText() (text []byte, err error) { + return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalText(data []byte) (err error) { + d.Time, err = time.Parse(fullDate, string(data)) + return err +} + +// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). +func (d Date) String() string { + return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) +} + +// ToTime returns a Date as a time.Time +func (d Date) ToTime() time.Time { + return d.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go new file mode 100644 index 000000000..4e0543207 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/Azure/go-autorest/autorest/date/time.go new file mode 100644 index 000000000..b453fad04 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/time.go @@ -0,0 +1,103 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "regexp" + "time" +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +const ( + azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"` + azureUtcFormat = "2006-01-02T15:04:05.999999999" + rfc3339JSON = `"` + time.RFC3339Nano + `"` + rfc3339 = time.RFC3339Nano + tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$` +) + +// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +type Time struct { + time.Time +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalBinary() ([]byte, error) { + return t.Time.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalJSON() (json []byte, err error) { + return t.Time.MarshalJSON() +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalJSON(data []byte) (err error) { + timeFormat := azureUtcFormatJSON + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339JSON + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalText() (text []byte, err error) { + return t.Time.MarshalText() +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalText(data []byte) (err error) { + timeFormat := azureUtcFormat + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339 + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// String returns the Time formatted as an RFC3339 date-time string (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) String() string { + // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} + +// ToTime returns a Time as a time.Time +func (t Time) ToTime() time.Time { + return t.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go new file mode 100644 index 000000000..48fb39ba9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go @@ -0,0 +1,100 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "time" +) + +const ( + rfc1123JSON = `"` + time.RFC1123 + `"` + rfc1123 = time.RFC1123 +) + +// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +type TimeRFC1123 struct { + time.Time +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123JSON, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalJSON() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]") + } + b := []byte(t.Format(rfc1123JSON)) + return b, nil +} + +// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalText() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalText: year outside of range [0,9999]") + } + + b := []byte(t.Format(rfc1123)) + return b, nil +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalBinary() ([]byte, error) { + return t.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// ToTime returns a Time as a time.Time +func (t TimeRFC1123) ToTime() time.Time { + return t.Time +} + +// String returns the Time formatted as an RFC1123 date-time string (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) String() string { + // Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go new file mode 100644 index 000000000..7073959b2 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go @@ -0,0 +1,123 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "time" +) + +// unixEpoch is the moment in time that should be treated as timestamp 0. +var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) + +// UnixTime marshals and unmarshals a time that is represented as the number +// of seconds (ignoring skip-seconds) since the Unix Epoch. +type UnixTime time.Time + +// Duration returns the time as a Duration since the UnixEpoch. +func (t UnixTime) Duration() time.Duration { + return time.Time(t).Sub(unixEpoch) +} + +// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch. +func NewUnixTimeFromSeconds(seconds float64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second))) +} + +// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch. +func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(nanoseconds)) +} + +// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch. +func NewUnixTimeFromDuration(dur time.Duration) UnixTime { + return UnixTime(unixEpoch.Add(dur)) +} + +// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0' +func UnixEpoch() time.Time { + return unixEpoch +} + +// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements. +// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.) +func (t UnixTime) MarshalJSON() ([]byte, error) { + buffer := &bytes.Buffer{} + enc := json.NewEncoder(buffer) + err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9) + if err != nil { + return nil, err + } + return buffer.Bytes(), nil +} + +// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since +// midnight January 1st, 1970. +func (t *UnixTime) UnmarshalJSON(text []byte) error { + dec := json.NewDecoder(bytes.NewReader(text)) + + var secondsSinceEpoch float64 + if err := dec.Decode(&secondsSinceEpoch); err != nil { + return err + } + + *t = NewUnixTimeFromSeconds(secondsSinceEpoch) + + return nil +} + +// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number. +func (t UnixTime) MarshalText() ([]byte, error) { + cast := time.Time(t) + return cast.MarshalText() +} + +// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch. +func (t *UnixTime) UnmarshalText(raw []byte) error { + var unmarshaled time.Time + + if err := unmarshaled.UnmarshalText(raw); err != nil { + return err + } + + *t = UnixTime(unmarshaled) + return nil +} + +// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch. +func (t UnixTime) MarshalBinary() ([]byte, error) { + buf := &bytes.Buffer{} + + payload := int64(t.Duration()) + + if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime. +func (t *UnixTime) UnmarshalBinary(raw []byte) error { + var nanosecondsSinceEpoch int64 + + if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil { + return err + } + *t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch) + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go new file mode 100644 index 000000000..12addf0eb --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go @@ -0,0 +1,25 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "strings" + "time" +) + +// ParseTime to parse Time string to specified format. +func ParseTime(format string, t string) (d time.Time, err error) { + return time.Parse(format, strings.ToUpper(t)) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/Azure/go-autorest/autorest/error.go new file mode 100644 index 000000000..35098eda8 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/error.go @@ -0,0 +1,103 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" +) + +const ( + // UndefinedStatusCode is used when HTTP status code is not available for an error. + UndefinedStatusCode = 0 +) + +// DetailedError encloses a error with details of the package, method, and associated HTTP +// status code (if any). +type DetailedError struct { + Original error + + // PackageType is the package type of the object emitting the error. For types, the value + // matches that produced the the '%T' format specifier of the fmt package. For other elements, + // such as functions, it is just the package name (e.g., "autorest"). + PackageType string + + // Method is the name of the method raising the error. + Method string + + // StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error. + StatusCode interface{} + + // Message is the error message. + Message string + + // Service Error is the response body of failed API in bytes + ServiceError []byte + + // Response is the response object that was returned during failure if applicable. + Response *http.Response +} + +// NewError creates a new Error conforming object from the passed packageType, method, and +// message. message is treated as a format string to which the optional args apply. +func NewError(packageType string, method string, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, nil, message, args...) +} + +// NewErrorWithResponse creates a new Error conforming object from the passed +// packageType, method, statusCode of the given resp (UndefinedStatusCode if +// resp is nil), and message. message is treated as a format string to which the +// optional args apply. +func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, resp, message, args...) +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + if v, ok := original.(DetailedError); ok { + return v + } + + statusCode := UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + + return DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + Response: resp, + } +} + +// Error returns a formatted containing all available details (i.e., PackageType, Method, +// StatusCode, Message, and original error (if any)). +func (e DetailedError) Error() string { + if e.Original == nil { + return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode) + } + return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original) +} + +// Unwrap returns the original error. +func (e DetailedError) Unwrap() error { + return e.Original +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go new file mode 100644 index 000000000..792f82d4b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go @@ -0,0 +1,25 @@ +//go:build modhack +// +build modhack + +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go new file mode 100644 index 000000000..121a66fa8 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go @@ -0,0 +1,549 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "context" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/url" + "strings" +) + +const ( + mimeTypeJSON = "application/json" + mimeTypeOctetStream = "application/octet-stream" + mimeTypeFormPost = "application/x-www-form-urlencoded" + + headerAuthorization = "Authorization" + headerAuxAuthorization = "x-ms-authorization-auxiliary" + headerContentType = "Content-Type" + headerUserAgent = "User-Agent" +) + +// used as a key type in context.WithValue() +type ctxPrepareDecorators struct{} + +// WithPrepareDecorators adds the specified PrepareDecorators to the provided context. +// If no PrepareDecorators are provided the context is unchanged. +func WithPrepareDecorators(ctx context.Context, prepareDecorator []PrepareDecorator) context.Context { + if len(prepareDecorator) == 0 { + return ctx + } + return context.WithValue(ctx, ctxPrepareDecorators{}, prepareDecorator) +} + +// GetPrepareDecorators returns the PrepareDecorators in the provided context or the provided default PrepareDecorators. +func GetPrepareDecorators(ctx context.Context, defaultPrepareDecorators ...PrepareDecorator) []PrepareDecorator { + inCtx := ctx.Value(ctxPrepareDecorators{}) + if pd, ok := inCtx.([]PrepareDecorator); ok { + return pd + } + return defaultPrepareDecorators +} + +// Preparer is the interface that wraps the Prepare method. +// +// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations +// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. +type Preparer interface { + Prepare(*http.Request) (*http.Request, error) +} + +// PreparerFunc is a method that implements the Preparer interface. +type PreparerFunc func(*http.Request) (*http.Request, error) + +// Prepare implements the Preparer interface on PreparerFunc. +func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { + return pf(r) +} + +// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then affect the result. +type PrepareDecorator func(Preparer) Preparer + +// CreatePreparer creates, decorates, and returns a Preparer. +// Without decorators, the returned Preparer returns the passed http.Request unmodified. +// Preparers are safe to share and re-use. +func CreatePreparer(decorators ...PrepareDecorator) Preparer { + return DecoratePreparer( + Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), + decorators...) +} + +// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it +// applies to the Preparer. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (change the http.Request and then pass it +// along) or a post-decorator (pass the http.Request along and alter it on return). +func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { + for _, decorate := range decorators { + p = decorate(p) + } + return p +} + +// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. +// It creates a Preparer from the decorators which it then applies to the passed http.Request. +func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { + if r == nil { + return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") + } + return CreatePreparer(decorators...).Prepare(r) +} + +// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed +// http.Request. +func WithNothing() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + return p.Prepare(r) + }) + } +} + +// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to +// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before +// adding the header. +func WithHeader(header string, value string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + setHeader(r, http.CanonicalHeaderKey(header), value) + } + return r, err + }) + } +} + +// WithHeaders returns a PrepareDecorator that sets the specified HTTP headers of the http.Request to +// the passed value. It canonicalizes the passed headers name (via http.CanonicalHeaderKey) before +// adding them. +func WithHeaders(headers map[string]interface{}) PrepareDecorator { + h := ensureValueStrings(headers) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + + for name, value := range h { + r.Header.Set(http.CanonicalHeaderKey(name), value) + } + } + return r, err + }) + } +} + +// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the supplied token. +func WithBearerAuthorization(token string) PrepareDecorator { + return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) +} + +// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value +// is the passed contentType. +func AsContentType(contentType string) PrepareDecorator { + return WithHeader(headerContentType, contentType) +} + +// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the +// passed string. +func WithUserAgent(ua string) PrepareDecorator { + return WithHeader(headerUserAgent, ua) +} + +// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/x-www-form-urlencoded". +func AsFormURLEncoded() PrepareDecorator { + return AsContentType(mimeTypeFormPost) +} + +// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/json". +func AsJSON() PrepareDecorator { + return AsContentType(mimeTypeJSON) +} + +// AsOctetStream returns a PrepareDecorator that adds the "application/octet-stream" Content-Type header. +func AsOctetStream() PrepareDecorator { + return AsContentType(mimeTypeOctetStream) +} + +// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The +// decorator does not validate that the passed method string is a known HTTP method. +func WithMethod(method string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r.Method = method + return p.Prepare(r) + }) + } +} + +// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. +func AsDelete() PrepareDecorator { return WithMethod("DELETE") } + +// AsGet returns a PrepareDecorator that sets the HTTP method to GET. +func AsGet() PrepareDecorator { return WithMethod("GET") } + +// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. +func AsHead() PrepareDecorator { return WithMethod("HEAD") } + +// AsMerge returns a PrepareDecorator that sets the HTTP method to MERGE. +func AsMerge() PrepareDecorator { return WithMethod("MERGE") } + +// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. +func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } + +// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. +func AsPatch() PrepareDecorator { return WithMethod("PATCH") } + +// AsPost returns a PrepareDecorator that sets the HTTP method to POST. +func AsPost() PrepareDecorator { return WithMethod("POST") } + +// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. +func AsPut() PrepareDecorator { return WithMethod("PUT") } + +// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed +// from the supplied baseUrl. Query parameters will be encoded as required. +func WithBaseURL(baseURL string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var u *url.URL + if u, err = url.Parse(baseURL); err != nil { + return r, err + } + if u.Scheme == "" { + return r, fmt.Errorf("autorest: No scheme detected in URL %s", baseURL) + } + if u.RawQuery != "" { + // handle unencoded semicolons (ideally the server would send them already encoded) + u.RawQuery = strings.Replace(u.RawQuery, ";", "%3B", -1) + q, err := url.ParseQuery(u.RawQuery) + if err != nil { + return r, err + } + u.RawQuery = q.Encode() + } + r.URL = u + } + return r, err + }) + } +} + +// WithBytes returns a PrepareDecorator that takes a list of bytes +// which passes the bytes directly to the body +func WithBytes(input *[]byte) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if input == nil { + return r, fmt.Errorf("Input Bytes was nil") + } + + r.ContentLength = int64(len(*input)) + r.Body = ioutil.NopCloser(bytes.NewReader(*input)) + } + return r, err + }) + } +} + +// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the +// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. +func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(urlParameters) + for key, value := range parameters { + baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1) + } + return WithBaseURL(baseURL) +} + +// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the +// http.Request body. +func WithFormData(v url.Values) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + s := v.Encode() + + setHeader(r, http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost) + r.ContentLength = int64(len(s)) + r.Body = ioutil.NopCloser(strings.NewReader(s)) + } + return r, err + }) + } +} + +// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters +// into the http.Request body. +func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var body bytes.Buffer + writer := multipart.NewWriter(&body) + for key, value := range formDataParameters { + if rc, ok := value.(io.ReadCloser); ok { + var fd io.Writer + if fd, err = writer.CreateFormFile(key, key); err != nil { + return r, err + } + if _, err = io.Copy(fd, rc); err != nil { + return r, err + } + } else { + if err = writer.WriteField(key, ensureValueString(value)); err != nil { + return r, err + } + } + } + if err = writer.Close(); err != nil { + return r, err + } + setHeader(r, http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType()) + r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + r.ContentLength = int64(body.Len()) + return r, err + } + return r, err + }) + } +} + +// WithFile returns a PrepareDecorator that sends file in request body. +func WithFile(f io.ReadCloser) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := ioutil.ReadAll(f) + if err != nil { + return r, err + } + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + r.ContentLength = int64(len(b)) + } + return r, err + }) + } +} + +// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request +// and sets the Content-Length header. +func WithBool(v bool) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the +// request and sets the Content-Length header. +func WithFloat32(v float32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the +// request and sets the Content-Length header. +func WithFloat64(v float64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request +// and sets the Content-Length header. +func WithInt32(v int32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request +// and sets the Content-Length header. +func WithInt64(v int64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithString returns a PrepareDecorator that encodes the passed string into the body of the request +// and sets the Content-Length header. +func WithString(v string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + r.ContentLength = int64(len(v)) + r.Body = ioutil.NopCloser(strings.NewReader(v)) + } + return r, err + }) + } +} + +// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the +// request and sets the Content-Length header. +func WithJSON(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := json.Marshal(v) + if err == nil { + r.ContentLength = int64(len(b)) + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + } + return r, err + }) + } +} + +// WithXML returns a PrepareDecorator that encodes the data passed as XML into the body of the +// request and sets the Content-Length header. +func WithXML(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := xml.Marshal(v) + if err == nil { + // we have to tack on an XML header + withHeader := xml.Header + string(b) + bytesWithHeader := []byte(withHeader) + + r.ContentLength = int64(len(bytesWithHeader)) + setHeader(r, headerContentLength, fmt.Sprintf("%d", len(bytesWithHeader))) + r.Body = ioutil.NopCloser(bytes.NewReader(bytesWithHeader)) + } + } + return r, err + }) + } +} + +// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path +// is absolute (that is, it begins with a "/"), it replaces the existing path. +func WithPath(path string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPath", "Invoked with a nil URL") + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The +// values will be escaped (aka URL encoded) before insertion into the path. +func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := escapeValueStrings(ensureValueStrings(pathParameters)) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. +func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(pathParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +func parseURL(u *url.URL, path string) (*url.URL, error) { + p := strings.TrimRight(u.String(), "/") + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + return url.Parse(p + path) +} + +// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters +// given in the supplied map (i.e., key=value). +func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { + parameters := MapToValues(queryParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") + } + v := r.URL.Query() + for key, value := range parameters { + for i := range value { + d, err := url.QueryUnescape(value[i]) + if err != nil { + return r, err + } + value[i] = d + } + v[key] = value + } + r.URL.RawQuery = v.Encode() + } + return r, err + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go new file mode 100644 index 000000000..349e1963a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/responder.go @@ -0,0 +1,269 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" +) + +// Responder is the interface that wraps the Respond method. +// +// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold +// state since Responders may be shared and re-used. +type Responder interface { + Respond(*http.Response) error +} + +// ResponderFunc is a method that implements the Responder interface. +type ResponderFunc func(*http.Response) error + +// Respond implements the Responder interface on ResponderFunc. +func (rf ResponderFunc) Respond(r *http.Response) error { + return rf(r) +} + +// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to +// the http.Response and pass it along or, first, pass the http.Response along then react. +type RespondDecorator func(Responder) Responder + +// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned +// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share +// and re-used: It depends on the applied decorators. For example, a standard decorator that closes +// the response body is fine to share whereas a decorator that reads the body into a passed struct +// is not. +// +// To prevent memory leaks, ensure that at least one Responder closes the response body. +func CreateResponder(decorators ...RespondDecorator) Responder { + return DecorateResponder( + Responder(ResponderFunc(func(r *http.Response) error { return nil })), + decorators...) +} + +// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it +// applies to the Responder. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (react to the http.Response and then pass it +// along) or a post-decorator (pass the http.Response along and then react). +func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { + for _, decorate := range decorators { + r = decorate(r) + } + return r +} + +// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. +// It creates a Responder from the decorators it then applies to the passed http.Response. +func Respond(r *http.Response, decorators ...RespondDecorator) error { + if r == nil { + return nil + } + return CreateResponder(decorators...).Respond(r) +} + +// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined +// to the next RespondDecorator. +func ByIgnoring() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + return r.Respond(resp) + }) + } +} + +// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as +// the Body is read. +func ByCopying(b *bytes.Buffer) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + resp.Body = TeeReadCloser(resp.Body, b) + } + return err + }) + } +} + +// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which +// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed +// Responder is invoked prior to discarding the response body, the decorator may occur anywhere +// within the set. +func ByDiscardingBody() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { + return fmt.Errorf("Error discarding the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it +// closes the response body. Since the passed Responder is invoked prior to closing the response +// body, the decorator may occur anywhere within the set. +func ByClosing() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which +// it closes the response if the passed Responder returns an error and the response body exists. +func ByClosingIfError() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err != nil && resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByUnmarshallingBytes returns a RespondDecorator that copies the Bytes returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingBytes(v *[]byte) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + bytes, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + *v = bytes + } + } + return err + }) + } +} + +// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingJSON(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + // Some responses might include a BOM, remove for successful unmarshalling + b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else if len(strings.Trim(string(b), " ")) > 0 { + errInner = json.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingXML(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + errInner = xml.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response +// StatusCode is among the set passed. On error, response body is fully read into a buffer and +// presented in the returned error, as well as in the response body. +func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + if resp.Body != nil { + defer resp.Body.Close() + b, _ := ioutil.ReadAll(resp.Body) + derr.ServiceError = b + resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + err = derr + } + return err + }) + } +} + +// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is +// anything other than HTTP 200. +func WithErrorUnlessOK() RespondDecorator { + return WithErrorUnlessStatusCode(http.StatusOK) +} + +// ExtractHeader extracts all values of the specified header from the http.Response. It returns an +// empty string slice if the passed http.Response is nil or the header does not exist. +func ExtractHeader(header string, resp *http.Response) []string { + if resp != nil && resp.Header != nil { + return resp.Header[http.CanonicalHeaderKey(header)] + } + return nil +} + +// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It +// returns an empty string if the passed http.Response is nil or the header does not exist. +func ExtractHeaderValue(header string, resp *http.Response) string { + h := ExtractHeader(header, resp) + if len(h) > 0 { + return h[0] + } + return "" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go new file mode 100644 index 000000000..fa11dbed7 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go @@ -0,0 +1,52 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic. +func NewRetriableRequest(req *http.Request) *RetriableRequest { + return &RetriableRequest{req: req} +} + +// Request returns the wrapped HTTP request. +func (rr *RetriableRequest) Request() *http.Request { + return rr.req +} + +func (rr *RetriableRequest) prepareFromByteReader() (err error) { + // fall back to making a copy (only do this once) + b := []byte{} + if rr.req.ContentLength > 0 { + b = make([]byte, rr.req.ContentLength) + _, err = io.ReadFull(rr.req.Body, b) + if err != nil { + return err + } + } else { + b, err = ioutil.ReadAll(rr.req.Body) + if err != nil { + return err + } + } + rr.br = bytes.NewReader(b) + rr.req.Body = ioutil.NopCloser(rr.br) + return err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go new file mode 100644 index 000000000..4c87030e8 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go @@ -0,0 +1,55 @@ +//go:build !go1.8 +// +build !go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.br != nil { + _, err = rr.br.Seek(0, 0 /*io.SeekStart*/) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go new file mode 100644 index 000000000..05847c08b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go @@ -0,0 +1,67 @@ +//go:build go1.8 +// +build go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + rc io.ReadCloser + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.rc != nil { + rr.req.Body = rr.rc + } else if rr.br != nil { + _, err = rr.br.Seek(0, io.SeekStart) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.req.GetBody != nil { + // this will allow us to preserve the body without having to + // make a copy. note we need to do this on each iteration + rr.rc, err = rr.req.GetBody() + if err != nil { + return err + } + } else if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.GetBody = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go new file mode 100644 index 000000000..118de8141 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -0,0 +1,458 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "crypto/tls" + "fmt" + "log" + "math" + "net" + "net/http" + "net/http/cookiejar" + "strconv" + "sync" + "time" + + "github.com/Azure/go-autorest/logger" + "github.com/Azure/go-autorest/tracing" +) + +// there is one sender per TLS renegotiation type, i.e. count of tls.RenegotiationSupport enums +const defaultSendersCount = 3 + +type defaultSender struct { + sender Sender + init *sync.Once +} + +// each type of sender will be created on demand in sender() +var defaultSenders [defaultSendersCount]defaultSender + +func init() { + for i := 0; i < defaultSendersCount; i++ { + defaultSenders[i].init = &sync.Once{} + } +} + +// used as a key type in context.WithValue() +type ctxSendDecorators struct{} + +// WithSendDecorators adds the specified SendDecorators to the provided context. +// If no SendDecorators are provided the context is unchanged. +func WithSendDecorators(ctx context.Context, sendDecorator []SendDecorator) context.Context { + if len(sendDecorator) == 0 { + return ctx + } + return context.WithValue(ctx, ctxSendDecorators{}, sendDecorator) +} + +// GetSendDecorators returns the SendDecorators in the provided context or the provided default SendDecorators. +func GetSendDecorators(ctx context.Context, defaultSendDecorators ...SendDecorator) []SendDecorator { + inCtx := ctx.Value(ctxSendDecorators{}) + if sd, ok := inCtx.([]SendDecorator); ok { + return sd + } + return defaultSendDecorators +} + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(sender(tls.RenegotiateNever), decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} + +// Send sends, by means of the default http.Client, the passed http.Request, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// Send is a convenience method and not recommended for production. Advanced users should use +// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). +// +// Send will not poll or retry requests. +func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return SendWithSender(sender(tls.RenegotiateNever), r, decorators...) +} + +// SendWithSender sends the passed http.Request, through the provided Sender, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// SendWithSender will not poll or retry requests. +func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return DecorateSender(s, decorators...).Do(r) +} + +func sender(renengotiation tls.RenegotiationSupport) Sender { + // note that we can't init defaultSenders in init() since it will + // execute before calling code has had a chance to enable tracing + defaultSenders[renengotiation].init.Do(func() { + // copied from http.DefaultTransport with a TLS minimum version. + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + Renegotiation: renengotiation, + }, + } + var roundTripper http.RoundTripper = transport + if tracing.IsEnabled() { + roundTripper = tracing.NewTransport(transport) + } + j, _ := cookiejar.New(nil) + defaultSenders[renengotiation].sender = &http.Client{Jar: j, Transport: roundTripper} + }) + return defaultSenders[renengotiation].sender +} + +// AfterDelay returns a SendDecorator that delays for the passed time.Duration before +// invoking the Sender. The delay may be terminated by closing the optional channel on the +// http.Request. If canceled, no further Senders are invoked. +func AfterDelay(d time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + if !DelayForBackoff(d, 0, r.Context().Done()) { + return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") + } + return s.Do(r) + }) + } +} + +// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. +func AsIs() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return s.Do(r) + }) + } +} + +// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which +// it closes the response if the passed Sender returns an error and the response body exists. +func DoCloseIfError() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + Respond(resp, ByDiscardingBody(), ByClosing()) + } + return resp, err + }) + } +} + +// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is +// among the set passed. Since these are artificial errors, the response body may still require +// closing. +func DoErrorIfStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response +// StatusCode is among the set passed. Since these are artificial errors, the response body +// may still require closing. +func DoErrorUnlessStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the +// passed status codes. It expects the http.Response to contain a Location header providing the +// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than +// the supplied duration. It will delay between requests for the duration specified in the +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by +// closing the optional channel on the http.Request. +func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + resp, err = s.Do(r) + + if err == nil && ResponseHasStatusCode(resp, codes...) { + r, err = NewPollingRequestWithContext(r.Context(), resp) + + for err == nil && ResponseHasStatusCode(resp, codes...) { + Respond(resp, + ByDiscardingBody(), + ByClosing()) + resp, err = SendWithSender(s, r, + AfterDelay(GetRetryAfter(resp, delay))) + } + } + + return resp, err + }) + } +} + +// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on +// the http.Request. +func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + for attempt := 0; attempt < attempts; attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + DrainResponseBody(resp) + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + logger.Instance.Writef(logger.LogError, "DoRetryForAttempts: received error for attempt %d: %v\n", attempt+1, err) + if !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } + } + return resp, err + }) + } +} + +// Count429AsRetry indicates that a 429 response should be included as a retry attempt. +var Count429AsRetry = true + +// Max429Delay is the maximum duration to wait between retries on a 429 if no Retry-After header was received. +var Max429Delay time.Duration + +// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by cancelling the context on the http.Request. +// NOTE: Code http.StatusTooManyRequests (429) will *not* be counted against the number of attempts. +func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return doRetryForStatusCodesImpl(s, r, Count429AsRetry, attempts, backoff, 0, codes...) + }) + } +} + +// DoRetryForStatusCodesWithCap returns a SendDecorator that retries for specified statusCodes for up to the +// specified number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). To cap the maximum possible delay between iterations specify a value greater +// than zero for cap. Retrying may be canceled by cancelling the context on the http.Request. +func DoRetryForStatusCodesWithCap(attempts int, backoff, cap time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return doRetryForStatusCodesImpl(s, r, Count429AsRetry, attempts, backoff, cap, codes...) + }) + } +} + +func doRetryForStatusCodesImpl(s Sender, r *http.Request, count429 bool, attempts int, backoff, cap time.Duration, codes ...int) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + // Increment to add the first call (attempts denotes number of retries) + for attempt, delayCount := 0, 0; attempt < attempts+1; { + err = rr.Prepare() + if err != nil { + return + } + DrainResponseBody(resp) + resp, err = s.Do(rr.Request()) + // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication + // resp and err will both have a value, so in this case we don't want to retry as it will never succeed. + if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) { + return resp, err + } + if err != nil { + logger.Instance.Writef(logger.LogError, "DoRetryForStatusCodes: received error for attempt %d: %v\n", attempt+1, err) + } + delayed := DelayWithRetryAfter(resp, r.Context().Done()) + // if this was a 429 set the delay cap as specified. + // applicable only in the absence of a retry-after header. + if resp != nil && resp.StatusCode == http.StatusTooManyRequests { + cap = Max429Delay + } + if !delayed && !DelayForBackoffWithCap(backoff, cap, delayCount, r.Context().Done()) { + return resp, r.Context().Err() + } + // when count429 == false don't count a 429 against the number + // of attempts so that we continue to retry until it succeeds + if count429 || (resp == nil || resp.StatusCode != http.StatusTooManyRequests) { + attempt++ + } + // delay count is tracked separately from attempts to + // ensure that 429 participates in exponential back-off + delayCount++ + } + return resp, err +} + +// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header. +// The value of Retry-After can be either the number of seconds or a date in RFC1123 format. +// The function returns true after successfully waiting for the specified duration. If there is +// no Retry-After header or the wait is cancelled the return value is false. +func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { + if resp == nil { + return false + } + var dur time.Duration + ra := resp.Header.Get("Retry-After") + if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { + dur = time.Duration(retryAfter) * time.Second + } else if t, err := time.Parse(time.RFC1123, ra); err == nil { + dur = t.Sub(time.Now()) + } + if dur > 0 { + select { + case <-time.After(dur): + return true + case <-cancel: + return false + } + } + return false +} + +// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal +// to or greater than the specified duration, exponentially backing off between requests using the +// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the +// optional channel on the http.Request. +func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + end := time.Now().Add(d) + for attempt := 0; time.Now().Before(end); attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + DrainResponseBody(resp) + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + logger.Instance.Writef(logger.LogError, "DoRetryForDuration: received error for attempt %d: %v\n", attempt+1, err) + if !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } + } + return resp, err + }) + } +} + +// WithLogging returns a SendDecorator that implements simple before and after logging of the +// request. +func WithLogging(logger *log.Logger) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + logger.Printf("Sending %s %s", r.Method, r.URL) + resp, err := s.Do(r) + if err != nil { + logger.Printf("%s %s received error '%v'", r.Method, r.URL, err) + } else { + logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status) + } + return resp, err + }) + } +} + +// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early, +// returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. +func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { + return DelayForBackoffWithCap(backoff, 0, attempt, cancel) +} + +// DelayForBackoffWithCap invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. To cap the maximum possible delay specify a value greater than zero for cap. +// The delay may be canceled by closing the passed channel. If terminated early, returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. +func DelayForBackoffWithCap(backoff, cap time.Duration, attempt int, cancel <-chan struct{}) bool { + d := time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second + if cap > 0 && d > cap { + d = cap + } + logger.Instance.Writef(logger.LogInfo, "DelayForBackoffWithCap: sleeping for %s\n", d) + select { + case <-time.After(d): + return true + case <-cancel: + return false + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go new file mode 100644 index 000000000..d35b3850a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utility.go @@ -0,0 +1,232 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "reflect" + "strings" +) + +// EncodedAs is a series of constants specifying various data encodings +type EncodedAs string + +const ( + // EncodedAsJSON states that data is encoded as JSON + EncodedAsJSON EncodedAs = "JSON" + + // EncodedAsXML states that data is encoded as Xml + EncodedAsXML EncodedAs = "XML" +) + +// Decoder defines the decoding method json.Decoder and xml.Decoder share +type Decoder interface { + Decode(v interface{}) error +} + +// NewDecoder creates a new decoder appropriate to the passed encoding. +// encodedAs specifies the type of encoding and r supplies the io.Reader containing the +// encoded data. +func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { + if encodedAs == EncodedAsJSON { + return json.NewDecoder(r) + } else if encodedAs == EncodedAsXML { + return xml.NewDecoder(r) + } + return nil +} + +// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy +// is especially useful if there is a chance the data will fail to decode. +// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v +// is the decoding destination. +func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (b bytes.Buffer, err error) { + err = NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) + return +} + +// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. +// It utilizes io.TeeReader to copy the data read and has the same behavior when reading. +// Further, when it is closed, it ensures that rc is closed as well. +func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser { + return &teeReadCloser{rc, io.TeeReader(rc, w)} +} + +type teeReadCloser struct { + rc io.ReadCloser + r io.Reader +} + +func (t *teeReadCloser) Read(p []byte) (int, error) { + return t.r.Read(p) +} + +func (t *teeReadCloser) Close() error { + return t.rc.Close() +} + +func containsInt(ints []int, n int) bool { + for _, i := range ints { + if i == n { + return true + } + } + return false +} + +func escapeValueStrings(m map[string]string) map[string]string { + for key, value := range m { + m[key] = url.QueryEscape(value) + } + return m +} + +func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { + mapOfStrings := make(map[string]string) + for key, value := range mapOfInterface { + mapOfStrings[key] = ensureValueString(value) + } + return mapOfStrings +} + +func ensureValueString(value interface{}) string { + if value == nil { + return "" + } + switch v := value.(type) { + case string: + return v + case []byte: + return string(v) + default: + return fmt.Sprintf("%v", v) + } +} + +// MapToValues method converts map[string]interface{} to url.Values. +func MapToValues(m map[string]interface{}) url.Values { + v := url.Values{} + for key, value := range m { + x := reflect.ValueOf(value) + if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { + for i := 0; i < x.Len(); i++ { + v.Add(key, ensureValueString(x.Index(i))) + } + } else { + v.Add(key, ensureValueString(value)) + } + } + return v +} + +// AsStringSlice method converts interface{} to []string. +// s must be of type slice or array or an error is returned. +// Each element of s will be converted to its string representation. +func AsStringSlice(s interface{}) ([]string, error) { + v := reflect.ValueOf(s) + if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { + return nil, NewError("autorest", "AsStringSlice", "the value's type is not a slice or array.") + } + stringSlice := make([]string, 0, v.Len()) + + for i := 0; i < v.Len(); i++ { + stringSlice = append(stringSlice, fmt.Sprintf("%v", v.Index(i))) + } + return stringSlice, nil +} + +// String method converts interface v to string. If interface is a list, it +// joins list elements using the separator. Note that only sep[0] will be used for +// joining if any separator is specified. +func String(v interface{}, sep ...string) string { + if len(sep) == 0 { + return ensureValueString(v) + } + stringSlice, ok := v.([]string) + if ok == false { + var err error + stringSlice, err = AsStringSlice(v) + if err != nil { + panic(fmt.Sprintf("autorest: Couldn't convert value to a string %s.", err)) + } + } + return ensureValueString(strings.Join(stringSlice, sep[0])) +} + +// Encode method encodes url path and query parameters. +func Encode(location string, v interface{}, sep ...string) string { + s := String(v, sep...) + switch strings.ToLower(location) { + case "path": + return pathEscape(s) + case "query": + return queryEscape(s) + default: + return s + } +} + +func pathEscape(s string) string { + return strings.Replace(url.QueryEscape(s), "+", "%20", -1) +} + +func queryEscape(s string) string { + return url.QueryEscape(s) +} + +// ChangeToGet turns the specified http.Request into a GET (it assumes it wasn't). +// This is mainly useful for long-running operations that use the Azure-AsyncOperation +// header, so we change the initial PUT into a GET to retrieve the final result. +func ChangeToGet(req *http.Request) *http.Request { + req.Method = "GET" + req.Body = nil + req.ContentLength = 0 + req.Header.Del("Content-Length") + return req +} + +// IsTemporaryNetworkError returns true if the specified error is a temporary network error or false +// if it's not. If the error doesn't implement the net.Error interface the return value is true. +func IsTemporaryNetworkError(err error) bool { + if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) { + return true + } + return false +} + +// DrainResponseBody reads the response body then closes it. +func DrainResponseBody(resp *http.Response) error { + if resp != nil && resp.Body != nil { + _, err := io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + return err + } + return nil +} + +func setHeader(r *http.Request, key, value string) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(key, value) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility_1.13.go b/vendor/github.com/Azure/go-autorest/autorest/utility_1.13.go new file mode 100644 index 000000000..3133fcc08 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utility_1.13.go @@ -0,0 +1,30 @@ +//go:build go1.13 +// +build go1.13 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "errors" + + "github.com/Azure/go-autorest/autorest/adal" +) + +// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError interface. +func IsTokenRefreshError(err error) bool { + var tre adal.TokenRefreshError + return errors.As(err, &tre) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility_legacy.go b/vendor/github.com/Azure/go-autorest/autorest/utility_legacy.go new file mode 100644 index 000000000..851e152db --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utility_legacy.go @@ -0,0 +1,32 @@ +//go:build !go1.13 +// +build !go1.13 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import "github.com/Azure/go-autorest/autorest/adal" + +// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError +// interface. If err is a DetailedError it will walk the chain of Original errors. +func IsTokenRefreshError(err error) bool { + if _, ok := err.(adal.TokenRefreshError); ok { + return true + } + if de, ok := err.(DetailedError); ok { + return IsTokenRefreshError(de.Original) + } + return false +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go new file mode 100644 index 000000000..713e23581 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -0,0 +1,41 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "runtime" +) + +const number = "v14.2.1" + +var ( + userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + number, + ) +) + +// UserAgent returns a string containing the Go version, system architecture and OS, and the go-autorest version. +func UserAgent() string { + return userAgent +} + +// Version returns the semantic version (see http://semver.org). +func Version() string { + return number +} diff --git a/vendor/github.com/Azure/go-autorest/azure-pipelines.yml b/vendor/github.com/Azure/go-autorest/azure-pipelines.yml new file mode 100644 index 000000000..6fb8404fd --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/azure-pipelines.yml @@ -0,0 +1,105 @@ +variables: + GOPATH: '$(system.defaultWorkingDirectory)/work' + sdkPath: '$(GOPATH)/src/github.com/$(build.repository.name)' + +jobs: + - job: 'goautorest' + displayName: 'Run go-autorest CI Checks' + + strategy: + matrix: + Linux_Go113: + vm.image: 'ubuntu-18.04' + go.version: '1.13' + Linux_Go114: + vm.image: 'ubuntu-18.04' + go.version: '1.14' + + pool: + vmImage: '$(vm.image)' + + steps: + - task: GoTool@0 + inputs: + version: '$(go.version)' + displayName: "Select Go Version" + + - script: | + set -e + mkdir -p '$(GOPATH)/bin' + mkdir -p '$(sdkPath)' + shopt -s extglob + mv !(work) '$(sdkPath)' + echo '##vso[task.prependpath]$(GOPATH)/bin' + displayName: 'Create Go Workspace' + + - script: | + set -e + curl -sSL https://raw.githubusercontent.com/golang/dep/master/install.sh | sh + dep ensure -v + go install ./vendor/golang.org/x/lint/golint + go get github.com/jstemmer/go-junit-report + go get github.com/axw/gocov/gocov + go get github.com/AlekSi/gocov-xml + go get -u github.com/matm/gocov-html + workingDirectory: '$(sdkPath)' + displayName: 'Install Dependencies' + + - script: | + go vet ./autorest/... + go vet ./logger/... + go vet ./tracing/... + workingDirectory: '$(sdkPath)' + displayName: 'Vet' + + - script: | + go build -v ./autorest/... + go build -v ./logger/... + go build -v ./tracing/... + workingDirectory: '$(sdkPath)' + displayName: 'Build' + + - script: | + set -e + go test -race -v -coverprofile=coverage.txt -covermode atomic ./autorest/... ./logger/... ./tracing/... 2>&1 | go-junit-report > report.xml + gocov convert coverage.txt > coverage.json + gocov-xml < coverage.json > coverage.xml + gocov-html < coverage.json > coverage.html + workingDirectory: '$(sdkPath)' + displayName: 'Run Tests' + + - script: grep -L -r --include *.go --exclude-dir vendor -P "Copyright (\d{4}|\(c\)) Microsoft" ./ | tee >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Copyright Header Check' + failOnStderr: true + condition: succeededOrFailed() + + - script: | + gofmt -s -l -w ./autorest/. >&2 + gofmt -s -l -w ./logger/. >&2 + gofmt -s -l -w ./tracing/. >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Format Check' + failOnStderr: true + condition: succeededOrFailed() + + - script: | + golint ./autorest/... >&2 + golint ./logger/... >&2 + golint ./tracing/... >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Linter Check' + failOnStderr: true + condition: succeededOrFailed() + + - task: PublishTestResults@2 + inputs: + testRunner: JUnit + testResultsFiles: $(sdkPath)/report.xml + failTaskOnFailedTests: true + + - task: PublishCodeCoverageResults@1 + inputs: + codeCoverageTool: Cobertura + summaryFileLocation: $(sdkPath)/coverage.xml + additionalCodeCoverageFiles: $(sdkPath)/coverage.html diff --git a/vendor/github.com/Azure/go-autorest/doc.go b/vendor/github.com/Azure/go-autorest/doc.go new file mode 100644 index 000000000..99ae6ca98 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/doc.go @@ -0,0 +1,18 @@ +/* +Package go-autorest provides an HTTP request client for use with Autorest-generated API client packages. +*/ +package go_autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/logger/LICENSE b/vendor/github.com/Azure/go-autorest/logger/LICENSE new file mode 100644 index 000000000..b9d6a27ea --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go new file mode 100644 index 000000000..0aa27680d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package logger + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/logger/logger.go b/vendor/github.com/Azure/go-autorest/logger/logger.go new file mode 100644 index 000000000..2f5d8cc1a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/logger.go @@ -0,0 +1,337 @@ +package logger + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "strings" + "sync" + "time" +) + +// LevelType tells a logger the minimum level to log. When code reports a log entry, +// the LogLevel indicates the level of the log entry. The logger only records entries +// whose level is at least the level it was told to log. See the Log* constants. +// For example, if a logger is configured with LogError, then LogError, LogPanic, +// and LogFatal entries will be logged; lower level entries are ignored. +type LevelType uint32 + +const ( + // LogNone tells a logger not to log any entries passed to it. + LogNone LevelType = iota + + // LogFatal tells a logger to log all LogFatal entries passed to it. + LogFatal + + // LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it. + LogPanic + + // LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it. + LogError + + // LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogWarning + + // LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogInfo + + // LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogDebug + + // LogAuth is a special case of LogDebug, it tells a logger to also log the body of an authentication request and response. + // NOTE: this can disclose sensitive information, use with care. + LogAuth +) + +const ( + logNone = "NONE" + logFatal = "FATAL" + logPanic = "PANIC" + logError = "ERROR" + logWarning = "WARNING" + logInfo = "INFO" + logDebug = "DEBUG" + logAuth = "AUTH" + logUnknown = "UNKNOWN" +) + +// ParseLevel converts the specified string into the corresponding LevelType. +func ParseLevel(s string) (lt LevelType, err error) { + switch strings.ToUpper(s) { + case logFatal: + lt = LogFatal + case logPanic: + lt = LogPanic + case logError: + lt = LogError + case logWarning: + lt = LogWarning + case logInfo: + lt = LogInfo + case logDebug: + lt = LogDebug + case logAuth: + lt = LogAuth + default: + err = fmt.Errorf("bad log level '%s'", s) + } + return +} + +// String implements the stringer interface for LevelType. +func (lt LevelType) String() string { + switch lt { + case LogNone: + return logNone + case LogFatal: + return logFatal + case LogPanic: + return logPanic + case LogError: + return logError + case LogWarning: + return logWarning + case LogInfo: + return logInfo + case LogDebug: + return logDebug + case LogAuth: + return logAuth + default: + return logUnknown + } +} + +// Filter defines functions for filtering HTTP request/response content. +type Filter struct { + // URL returns a potentially modified string representation of a request URL. + URL func(u *url.URL) string + + // Header returns a potentially modified set of values for the specified key. + // To completely exclude the header key/values return false. + Header func(key string, val []string) (bool, []string) + + // Body returns a potentially modified request/response body. + Body func(b []byte) []byte +} + +func (f Filter) processURL(u *url.URL) string { + if f.URL == nil { + return u.String() + } + return f.URL(u) +} + +func (f Filter) processHeader(k string, val []string) (bool, []string) { + if f.Header == nil { + return true, val + } + return f.Header(k, val) +} + +func (f Filter) processBody(b []byte) []byte { + if f.Body == nil { + return b + } + return f.Body(b) +} + +// Writer defines methods for writing to a logging facility. +type Writer interface { + // Writeln writes the specified message with the standard log entry header and new-line character. + Writeln(level LevelType, message string) + + // Writef writes the specified format specifier with the standard log entry header and no new-line character. + Writef(level LevelType, format string, a ...interface{}) + + // WriteRequest writes the specified HTTP request to the logger if the log level is greater than + // or equal to LogInfo. The request body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no request content is excluded. + WriteRequest(req *http.Request, filter Filter) + + // WriteResponse writes the specified HTTP response to the logger if the log level is greater than + // or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no response content is excluded. + WriteResponse(resp *http.Response, filter Filter) +} + +// Instance is the default log writer initialized during package init. +// This can be replaced with a custom implementation as required. +var Instance Writer + +// default log level +var logLevel = LogNone + +// Level returns the value specified in AZURE_GO_AUTOREST_LOG_LEVEL. +// If no value was specified the default value is LogNone. +// Custom loggers can call this to retrieve the configured log level. +func Level() LevelType { + return logLevel +} + +func init() { + // separated for testing purposes + initDefaultLogger() +} + +func initDefaultLogger() { + // init with nilLogger so callers don't have to do a nil check on Default + Instance = nilLogger{} + llStr := strings.ToLower(os.Getenv("AZURE_GO_SDK_LOG_LEVEL")) + if llStr == "" { + return + } + var err error + logLevel, err = ParseLevel(llStr) + if err != nil { + fmt.Fprintf(os.Stderr, "go-autorest: failed to parse log level: %s\n", err.Error()) + return + } + if logLevel == LogNone { + return + } + // default to stderr + dest := os.Stderr + lfStr := os.Getenv("AZURE_GO_SDK_LOG_FILE") + if strings.EqualFold(lfStr, "stdout") { + dest = os.Stdout + } else if lfStr != "" { + lf, err := os.Create(lfStr) + if err == nil { + dest = lf + } else { + fmt.Fprintf(os.Stderr, "go-autorest: failed to create log file, using stderr: %s\n", err.Error()) + } + } + Instance = fileLogger{ + logLevel: logLevel, + mu: &sync.Mutex{}, + logFile: dest, + } +} + +// the nil logger does nothing +type nilLogger struct{} + +func (nilLogger) Writeln(LevelType, string) {} + +func (nilLogger) Writef(LevelType, string, ...interface{}) {} + +func (nilLogger) WriteRequest(*http.Request, Filter) {} + +func (nilLogger) WriteResponse(*http.Response, Filter) {} + +// A File is used instead of a Logger so the stream can be flushed after every write. +type fileLogger struct { + logLevel LevelType + mu *sync.Mutex // for synchronizing writes to logFile + logFile *os.File +} + +func (fl fileLogger) Writeln(level LevelType, message string) { + fl.Writef(level, "%s\n", message) +} + +func (fl fileLogger) Writef(level LevelType, format string, a ...interface{}) { + if fl.logLevel >= level { + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprintf(fl.logFile, "%s %s", entryHeader(level), fmt.Sprintf(format, a...)) + fl.logFile.Sync() + } +} + +func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) { + if req == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s REQUEST: %s %s\n", entryHeader(LogInfo), req.Method, filter.processURL(req.URL)) + // dump headers + for k, v := range req.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(req.Header, req.Body) { + // dump body + body, err := ioutil.ReadAll(req.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + if nc, ok := req.Body.(io.Seeker); ok { + // rewind to the beginning + nc.Seek(0, io.SeekStart) + } else { + // recreate the body + req.Body = ioutil.NopCloser(bytes.NewReader(body)) + } + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) { + if resp == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s RESPONSE: %d %s\n", entryHeader(LogInfo), resp.StatusCode, filter.processURL(resp.Request.URL)) + // dump headers + for k, v := range resp.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(resp.Header, resp.Body) { + // dump body + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + resp.Body = ioutil.NopCloser(bytes.NewReader(body)) + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +// returns true if the provided body should be included in the log +func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool { + ct := header.Get("Content-Type") + return fl.logLevel >= LogDebug && body != nil && !strings.Contains(ct, "application/octet-stream") +} + +// creates standard header for log entries, it contains a timestamp and the log level +func entryHeader(level LevelType) string { + // this format provides a fixed number of digits so the size of the timestamp is constant + return fmt.Sprintf("(%s) %s:", time.Now().Format("2006-01-02T15:04:05.0000000Z07:00"), level.String()) +} diff --git a/vendor/github.com/Azure/go-autorest/tracing/LICENSE b/vendor/github.com/Azure/go-autorest/tracing/LICENSE new file mode 100644 index 000000000..b9d6a27ea --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go new file mode 100644 index 000000000..e163975cd --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package tracing + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/tracing/tracing.go b/vendor/github.com/Azure/go-autorest/tracing/tracing.go new file mode 100644 index 000000000..0e7a6e962 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/tracing.go @@ -0,0 +1,67 @@ +package tracing + +// Copyright 2018 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "net/http" +) + +// Tracer represents an HTTP tracing facility. +type Tracer interface { + NewTransport(base *http.Transport) http.RoundTripper + StartSpan(ctx context.Context, name string) context.Context + EndSpan(ctx context.Context, httpStatusCode int, err error) +} + +var ( + tracer Tracer +) + +// Register will register the provided Tracer. Pass nil to unregister a Tracer. +func Register(t Tracer) { + tracer = t +} + +// IsEnabled returns true if a Tracer has been registered. +func IsEnabled() bool { + return tracer != nil +} + +// NewTransport creates a new instrumenting http.RoundTripper for the +// registered Tracer. If no Tracer has been registered it returns nil. +func NewTransport(base *http.Transport) http.RoundTripper { + if tracer != nil { + return tracer.NewTransport(base) + } + return nil +} + +// StartSpan starts a trace span with the specified name, associating it with the +// provided context. Has no effect if a Tracer has not been registered. +func StartSpan(ctx context.Context, name string) context.Context { + if tracer != nil { + return tracer.StartSpan(ctx, name) + } + return ctx +} + +// EndSpan ends a previously started span stored in the context. +// Has no effect if a Tracer has not been registered. +func EndSpan(ctx context.Context, httpStatusCode int, err error) { + if tracer != nil { + tracer.EndSpan(ctx, httpStatusCode, err) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt new file mode 100644 index 000000000..899129ecc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go b/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go new file mode 100644 index 000000000..1c4967429 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go @@ -0,0 +1,93 @@ +// Package arn provides a parser for interacting with Amazon Resource Names. +package arn + +import ( + "errors" + "strings" +) + +const ( + arnDelimiter = ":" + arnSections = 6 + arnPrefix = "arn:" + + // zero-indexed + sectionPartition = 1 + sectionService = 2 + sectionRegion = 3 + sectionAccountID = 4 + sectionResource = 5 + + // errors + invalidPrefix = "arn: invalid prefix" + invalidSections = "arn: not enough sections" +) + +// ARN captures the individual fields of an Amazon Resource Name. +// See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html for more information. +type ARN struct { + // The partition that the resource is in. For standard AWS regions, the partition is "aws". If you have resources in + // other partitions, the partition is "aws-partitionname". For example, the partition for resources in the China + // (Beijing) region is "aws-cn". + Partition string + + // The service namespace that identifies the AWS product (for example, Amazon S3, IAM, or Amazon RDS). For a list of + // namespaces, see + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces. + Service string + + // The region the resource resides in. Note that the ARNs for some resources do not require a region, so this + // component might be omitted. + Region string + + // The ID of the AWS account that owns the resource, without the hyphens. For example, 123456789012. Note that the + // ARNs for some resources don't require an account number, so this component might be omitted. + AccountID string + + // The content of this part of the ARN varies by service. It often includes an indicator of the type of resource — + // for example, an IAM user or Amazon RDS database - followed by a slash (/) or a colon (:), followed by the + // resource name itself. Some services allows paths for resource names, as described in + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-paths. + Resource string +} + +// Parse parses an ARN into its constituent parts. +// +// Some example ARNs: +// arn:aws:elasticbeanstalk:us-east-1:123456789012:environment/My App/MyEnvironment +// arn:aws:iam::123456789012:user/David +// arn:aws:rds:eu-west-1:123456789012:db:mysql-db +// arn:aws:s3:::my_corporate_bucket/exampleobject.png +func Parse(arn string) (ARN, error) { + if !strings.HasPrefix(arn, arnPrefix) { + return ARN{}, errors.New(invalidPrefix) + } + sections := strings.SplitN(arn, arnDelimiter, arnSections) + if len(sections) != arnSections { + return ARN{}, errors.New(invalidSections) + } + return ARN{ + Partition: sections[sectionPartition], + Service: sections[sectionService], + Region: sections[sectionRegion], + AccountID: sections[sectionAccountID], + Resource: sections[sectionResource], + }, nil +} + +// IsARN returns whether the given string is an ARN by looking for +// whether the string starts with "arn:" and contains the correct number +// of sections delimited by colons(:). +func IsARN(arn string) bool { + return strings.HasPrefix(arn, arnPrefix) && strings.Count(arn, ":") >= arnSections-1 +} + +// String returns the canonical representation of the ARN +func (arn ARN) String() string { + return arnPrefix + + arn.Partition + arnDelimiter + + arn.Service + arnDelimiter + + arn.Region + arnDelimiter + + arn.AccountID + arnDelimiter + + arn.Resource +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go b/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go new file mode 100644 index 000000000..dd950a286 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go @@ -0,0 +1,50 @@ +package bearer + +import ( + "github.com/aws/aws-sdk-go/aws" + "time" +) + +// Token provides a type wrapping a bearer token and expiration metadata. +type Token struct { + Value string + + CanExpire bool + Expires time.Time +} + +// Expired returns if the token's Expires time is before or equal to the time +// provided. If CanExpire is false, Expired will always return false. +func (t Token) Expired(now time.Time) bool { + if !t.CanExpire { + return false + } + now = now.Round(0) + return now.Equal(t.Expires) || now.After(t.Expires) +} + +// TokenProvider provides interface for retrieving bearer tokens. +type TokenProvider interface { + RetrieveBearerToken(aws.Context) (Token, error) +} + +// TokenProviderFunc provides a helper utility to wrap a function as a type +// that implements the TokenProvider interface. +type TokenProviderFunc func(aws.Context) (Token, error) + +// RetrieveBearerToken calls the wrapped function, returning the Token or +// error. +func (fn TokenProviderFunc) RetrieveBearerToken(ctx aws.Context) (Token, error) { + return fn(ctx) +} + +// StaticTokenProvider provides a utility for wrapping a static bearer token +// value within an implementation of a token provider. +type StaticTokenProvider struct { + Token Token +} + +// RetrieveBearerToken returns the static token specified. +func (s StaticTokenProvider) RetrieveBearerToken(aws.Context) (Token, error) { + return s.Token, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go new file mode 100644 index 000000000..99849c0e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -0,0 +1,164 @@ +// Package awserr represents API error interface accessors for the SDK. +package awserr + +// An Error wraps lower level errors with code, message and an original error. +// The underlying concrete error type may also satisfy other interfaces which +// can be to used to obtain more specific information about the error. +// +// Calling Error() or String() will always include the full information about +// an error based on its underlying type. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Get error details +// log.Println("Error:", awsErr.Code(), awsErr.Message()) +// +// // Prints out full error message, including original error if there was one. +// log.Println("Error:", awsErr.Error()) +// +// // Get original error +// if origErr := awsErr.OrigErr(); origErr != nil { +// // operate on original error. +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type Error interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErr() error +} + +// BatchError is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Deprecated: Replaced with BatchedErrors. Only defined for backwards +// compatibility. +type BatchError interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// BatchedErrors is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Replaces BatchError +type BatchedErrors interface { + // Satisfy the base Error interface. + Error + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// New returns an Error object described by the code, message, and origErr. +// +// If origErr satisfies the Error interface it will not be wrapped within a new +// Error object and will instead be returned. +func New(code, message string, origErr error) Error { + var errs []error + if origErr != nil { + errs = append(errs, origErr) + } + return newBaseError(code, message, errs) +} + +// NewBatchError returns an BatchedErrors with a collection of errors as an +// array of errors. +func NewBatchError(code, message string, errs []error) BatchedErrors { + return newBaseError(code, message, errs) +} + +// A RequestFailure is an interface to extract request failure information from +// an Error such as the request ID of the failed request returned by a service. +// RequestFailures may not always have a requestID value if the request failed +// prior to reaching the service such as a connection error. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if reqerr, ok := err.(RequestFailure); ok { +// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// } else { +// log.Println("Error:", err.Error()) +// } +// } +// +// Combined with awserr.Error: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Generic AWS Error with Code, Message, and original error (if any) +// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) +// +// if reqErr, ok := err.(awserr.RequestFailure); ok { +// // A service error occurred +// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type RequestFailure interface { + Error + + // The status code of the HTTP response. + StatusCode() int + + // The request ID returned by the service for a request failure. This will + // be empty if no request ID is available such as the request failed due + // to a connection error. + RequestID() string +} + +// NewRequestFailure returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { + return newRequestError(err, statusCode, reqID) +} + +// UnmarshalError provides the interface for the SDK failing to unmarshal data. +type UnmarshalError interface { + awsError + Bytes() []byte +} + +// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding +// the bytes that fail to unmarshal to the error. +func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError { + return &unmarshalError{ + awsError: New("UnmarshalError", msg, err), + bytes: bytes, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go new file mode 100644 index 000000000..9cf7eaf40 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -0,0 +1,221 @@ +package awserr + +import ( + "encoding/hex" + "fmt" +) + +// SprintError returns a string of the formatted error code. +// +// Both extra and origErr are optional. If they are included their lines +// will be added, but if they are not included their lines will be ignored. +func SprintError(code, message, extra string, origErr error) string { + msg := fmt.Sprintf("%s: %s", code, message) + if extra != "" { + msg = fmt.Sprintf("%s\n\t%s", msg, extra) + } + if origErr != nil { + msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) + } + return msg +} + +// A baseError wraps the code and message which defines an error. It also +// can be used to wrap an original error object. +// +// Should be used as the root for errors satisfying the awserr.Error. Also +// for any error which does not fit into a specific error wrapper type. +type baseError struct { + // Classification of error + code string + + // Detailed information about error + message string + + // Optional original error this error is based off of. Allows building + // chained errors. + errs []error +} + +// newBaseError returns an error object for the code, message, and errors. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the +// error. +// +// origErrs is the error objects which will be nested under the new errors to +// be returned. +func newBaseError(code, message string, origErrs []error) *baseError { + b := &baseError{ + code: code, + message: message, + errs: origErrs, + } + + return b +} + +// Error returns the string representation of the error. +// +// See ErrorWithExtra for formatting. +// +// Satisfies the error interface. +func (b baseError) Error() string { + size := len(b.errs) + if size > 0 { + return SprintError(b.code, b.message, "", errorList(b.errs)) + } + + return SprintError(b.code, b.message, "", nil) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (b baseError) String() string { + return b.Error() +} + +// Code returns the short phrase depicting the classification of the error. +func (b baseError) Code() string { + return b.code +} + +// Message returns the error details message. +func (b baseError) Message() string { + return b.message +} + +// OrigErr returns the original error if one was set. Nil is returned if no +// error was set. This only returns the first element in the list. If the full +// list is needed, use BatchedErrors. +func (b baseError) OrigErr() error { + switch len(b.errs) { + case 0: + return nil + case 1: + return b.errs[0] + default: + if err, ok := b.errs[0].(Error); ok { + return NewBatchError(err.Code(), err.Message(), b.errs[1:]) + } + return NewBatchError("BatchedErrors", + "multiple errors occurred", b.errs) + } +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (b baseError) OrigErrs() []error { + return b.errs +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError Error + +// A requestError wraps a request or service error. +// +// Composed of baseError for code, message, and original error. +type requestError struct { + awsError + statusCode int + requestID string + bytes []byte +} + +// newRequestError returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +// +// Also wraps original errors via the baseError. +func newRequestError(err Error, statusCode int, requestID string) *requestError { + return &requestError{ + awsError: err, + statusCode: statusCode, + requestID: requestID, + } +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (r requestError) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s", + r.statusCode, r.requestID) + return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (r requestError) String() string { + return r.Error() +} + +// StatusCode returns the wrapped status code for the error +func (r requestError) StatusCode() int { + return r.statusCode +} + +// RequestID returns the wrapped requestID +func (r requestError) RequestID() string { + return r.requestID +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (r requestError) OrigErrs() []error { + if b, ok := r.awsError.(BatchedErrors); ok { + return b.OrigErrs() + } + return []error{r.OrigErr()} +} + +type unmarshalError struct { + awsError + bytes []byte +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (e unmarshalError) Error() string { + extra := hex.Dump(e.bytes) + return SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (e unmarshalError) String() string { + return e.Error() +} + +// Bytes returns the bytes that failed to unmarshal. +func (e unmarshalError) Bytes() []byte { + return e.bytes +} + +// An error list that satisfies the golang interface +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + // How do we want to handle the array size being zero + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += e[i].Error() + // We check the next index to see if it is within the slice. + // If it is, then we append a newline. We do this, because unit tests + // could be broken with the additional '\n' + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go new file mode 100644 index 000000000..1a3d106d5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go @@ -0,0 +1,108 @@ +package awsutil + +import ( + "io" + "reflect" + "time" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + if _, ok := src.Interface().(*time.Time); !ok { + dst.Set(reflect.New(e)) + } else { + tempValue := reflect.New(e) + tempValue.Elem().Set(src.Elem()) + // Sets time.Time's unexported values + dst.Set(tempValue) + } + } + if src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go new file mode 100644 index 000000000..142a7a01c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go @@ -0,0 +1,27 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type they are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go new file mode 100644 index 000000000..a4eb6a7f4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -0,0 +1,221 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/jmespath/go-jmespath" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +// rValuesAtPath returns a slice of values found in value v. The values +// in v are explored recursively so all nested values are collected. +func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) + if len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByNameFunc(func(name string) bool { + if c == name { + return true + } else if !caseSensitive && strings.EqualFold(name, c) { + return true + } + return false + }) + + if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { + if !value.IsNil() { + value.Set(reflect.Zero(value.Type())) + } + return []reflect.Value{value} + } + + if createPath && value.Kind() == reflect.Ptr && value.IsNil() { + // TODO if the value is the terminus it should not be created + // if the value to be set to its position is nil. + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, valItem := range values { + value := reflect.Indirect(valItem) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if createPath { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of values at the case insensitive lexical +// path inside of a structure. +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { + result, err := jmespath.Search(path, i) + if err != nil { + return nil, err + } + + v := reflect.ValueOf(result) + if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, nil + } + if s, ok := result.([]interface{}); ok { + return s, err + } + if v.Kind() == reflect.Map && v.Len() == 0 { + return nil, nil + } + if v.Kind() == reflect.Slice { + out := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + out[i] = v.Index(i).Interface() + } + return out, nil + } + + return []interface{}{result}, nil +} + +// SetValueAtPath sets a value at the case insensitive lexical path inside +// of a structure. +func SetValueAtPath(i interface{}, path string, v interface{}) { + rvals := rValuesAtPath(i, path, true, false, v == nil) + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue + } + setValue(rval, v) + } +} + +func setValue(dstVal reflect.Value, src interface{}) { + if dstVal.Kind() == reflect.Ptr { + dstVal = reflect.Indirect(dstVal) + } + srcVal := reflect.ValueOf(src) + + if !srcVal.IsValid() { // src is literal nil + if dstVal.CanAddr() { + // Convert to pointer so that pointer's value can be nil'ed + // dstVal = dstVal.Addr() + } + dstVal.Set(reflect.Zero(dstVal.Type())) + + } else if srcVal.Kind() == reflect.Ptr { + if srcVal.IsNil() { + srcVal = reflect.Zero(dstVal.Type()) + } else { + srcVal = reflect.ValueOf(src).Elem() + } + dstVal.Set(srcVal) + } else { + dstVal.Set(srcVal) + } + +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go new file mode 100644 index 000000000..11d4240d6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -0,0 +1,123 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Prettify returns the string representation of a value. +func Prettify(i interface{}) string { + var buf bytes.Buffer + prettify(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// prettify will recursively walk value v to build a textual +// representation of the value. +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("") + break + } + + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + ft, ok := v.Type().FieldByName(n) + if !ok { + panic(fmt.Sprintf("expected to find field %v on type %v, but was not found", n, v.Type())) + } + + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + + if tag := ft.Tag.Get("sensitive"); tag == "true" { + buf.WriteString("") + } else { + prettify(val, indent+2, buf) + } + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + strtype := v.Type().String() + if strtype == "[]uint8" { + fmt.Fprintf(buf, " len %d", v.Len()) + break + } + + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + prettify(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + prettify(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + if !v.IsValid() { + fmt.Fprint(buf, "") + return + } + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go new file mode 100644 index 000000000..3f7cffd95 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -0,0 +1,90 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +// +// Deprecated: Use Prettify instead. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + for i := 0; i < v.Type().NumField(); i++ { + ft := v.Type().Field(i) + fv := v.Field(i) + + if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { + continue // ignore unexported fields + } + if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() { + continue // ignore unset fields + } + + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(ft.Name + ": ") + + if tag := ft.Tag.Get("sensitive"); tag == "true" { + buf.WriteString("") + } else { + stringValue(fv, indent+2, buf) + } + + buf.WriteString(",\n") + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go new file mode 100644 index 000000000..b147f103c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -0,0 +1,94 @@ +package client + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides configuration to a service client instance. +type Config struct { + Config *aws.Config + Handlers request.Handlers + PartitionID string + Endpoint string + SigningRegion string + SigningName string + ResolvedRegion string + + // States that the signing name did not come from a modeled source but + // was derived based on other data. Used by service client constructors + // to determine if the signin name can be overridden based on metadata the + // service has. + SigningNameDerived bool +} + +// ConfigProvider provides a generic way for a service client to receive +// the ClientConfig without circular dependencies. +type ConfigProvider interface { + ClientConfig(serviceName string, cfgs ...*aws.Config) Config +} + +// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not +// resolve the endpoint automatically. The service client's endpoint must be +// provided via the aws.Config.Endpoint field. +type ConfigNoResolveEndpointProvider interface { + ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config +} + +// A Client implements the base client request and response handling +// used by all service clients. +type Client struct { + request.Retryer + metadata.ClientInfo + + Config aws.Config + Handlers request.Handlers +} + +// New will return a pointer to a new initialized service client. +func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { + svc := &Client{ + Config: cfg, + ClientInfo: info, + Handlers: handlers.Copy(), + } + + switch retryer, ok := cfg.Retryer.(request.Retryer); { + case ok: + svc.Retryer = retryer + case cfg.Retryer != nil && cfg.Logger != nil: + s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) + cfg.Logger.Log(s) + fallthrough + default: + maxRetries := aws.IntValue(cfg.MaxRetries) + if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = DefaultRetryerMaxNumRetries + } + svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} + } + + svc.AddDebugHandlers() + + for _, option := range options { + option(svc) + } + + return svc +} + +// NewRequest returns a new Request pointer for the service API +// operation and parameters. +func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { + return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) +} + +// AddDebugHandlers injects debug logging handlers into the service to log request +// debug information. +func (c *Client) AddDebugHandlers() { + c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler) + c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go new file mode 100644 index 000000000..9f6af19dd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -0,0 +1,177 @@ +package client + +import ( + "math" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkrand" +) + +// DefaultRetryer implements basic retry logic using exponential backoff for +// most services. If you want to implement custom retry logic, you can implement the +// request.Retryer interface. +// +type DefaultRetryer struct { + // Num max Retries is the number of max retries that will be performed. + // By default, this is zero. + NumMaxRetries int + + // MinRetryDelay is the minimum retry delay after which retry will be performed. + // If not set, the value is 0ns. + MinRetryDelay time.Duration + + // MinThrottleRetryDelay is the minimum retry delay when throttled. + // If not set, the value is 0ns. + MinThrottleDelay time.Duration + + // MaxRetryDelay is the maximum retry delay before which retry must be performed. + // If not set, the value is 0ns. + MaxRetryDelay time.Duration + + // MaxThrottleDelay is the maximum retry delay when throttled. + // If not set, the value is 0ns. + MaxThrottleDelay time.Duration +} + +const ( + // DefaultRetryerMaxNumRetries sets maximum number of retries + DefaultRetryerMaxNumRetries = 3 + + // DefaultRetryerMinRetryDelay sets minimum retry delay + DefaultRetryerMinRetryDelay = 30 * time.Millisecond + + // DefaultRetryerMinThrottleDelay sets minimum delay when throttled + DefaultRetryerMinThrottleDelay = 500 * time.Millisecond + + // DefaultRetryerMaxRetryDelay sets maximum retry delay + DefaultRetryerMaxRetryDelay = 300 * time.Second + + // DefaultRetryerMaxThrottleDelay sets maximum delay when throttled + DefaultRetryerMaxThrottleDelay = 300 * time.Second +) + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API request. +func (d DefaultRetryer) MaxRetries() int { + return d.NumMaxRetries +} + +// setRetryerDefaults sets the default values of the retryer if not set +func (d *DefaultRetryer) setRetryerDefaults() { + if d.MinRetryDelay == 0 { + d.MinRetryDelay = DefaultRetryerMinRetryDelay + } + if d.MaxRetryDelay == 0 { + d.MaxRetryDelay = DefaultRetryerMaxRetryDelay + } + if d.MinThrottleDelay == 0 { + d.MinThrottleDelay = DefaultRetryerMinThrottleDelay + } + if d.MaxThrottleDelay == 0 { + d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay + } +} + +// RetryRules returns the delay duration before retrying this request again +func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { + + // if number of max retries is zero, no retries will be performed. + if d.NumMaxRetries == 0 { + return 0 + } + + // Sets default value for retryer members + d.setRetryerDefaults() + + // minDelay is the minimum retryer delay + minDelay := d.MinRetryDelay + + var initialDelay time.Duration + + isThrottle := r.IsErrorThrottle() + if isThrottle { + if delay, ok := getRetryAfterDelay(r); ok { + initialDelay = delay + } + minDelay = d.MinThrottleDelay + } + + retryCount := r.RetryCount + + // maxDelay the maximum retryer delay + maxDelay := d.MaxRetryDelay + + if isThrottle { + maxDelay = d.MaxThrottleDelay + } + + var delay time.Duration + + // Logic to cap the retry count based on the minDelay provided + actualRetryCount := int(math.Log2(float64(minDelay))) + 1 + if actualRetryCount < 63-retryCount { + delay = time.Duration(1< maxDelay { + delay = getJitterDelay(maxDelay / 2) + } + } else { + delay = getJitterDelay(maxDelay / 2) + } + return delay + initialDelay +} + +// getJitterDelay returns a jittered delay for retry +func getJitterDelay(duration time.Duration) time.Duration { + return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration)) +} + +// ShouldRetry returns true if the request should be retried. +func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + + // ShouldRetry returns false if number of max retries is 0. + if d.NumMaxRetries == 0 { + return false + } + + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable != nil { + return *r.Retryable + } + return r.IsErrorRetryable() || r.IsErrorThrottle() +} + +// This will look in the Retry-After header, RFC 7231, for how long +// it will wait before attempting another request +func getRetryAfterDelay(r *request.Request) (time.Duration, bool) { + if !canUseRetryAfterHeader(r) { + return 0, false + } + + delayStr := r.HTTPResponse.Header.Get("Retry-After") + if len(delayStr) == 0 { + return 0, false + } + + delay, err := strconv.Atoi(delayStr) + if err != nil { + return 0, false + } + + return time.Duration(delay) * time.Second, true +} + +// Will look at the status code to see if the retry header pertains to +// the status code. +func canUseRetryAfterHeader(r *request.Request) bool { + switch r.HTTPResponse.StatusCode { + case 429: + case 503: + default: + return false + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go new file mode 100644 index 000000000..5ac5c24a1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -0,0 +1,206 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http/httputil" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +const logReqMsg = `DEBUG: Request %s/%s Details: +---[ REQUEST POST-SIGN ]----------------------------- +%s +-----------------------------------------------------` + +const logReqErrMsg = `DEBUG ERROR: Request %s/%s: +---[ REQUEST DUMP ERROR ]----------------------------- +%s +------------------------------------------------------` + +type logWriter struct { + // Logger is what we will use to log the payload of a response. + Logger aws.Logger + // buf stores the contents of what has been read + buf *bytes.Buffer +} + +func (logger *logWriter) Write(b []byte) (int, error) { + return logger.buf.Write(b) +} + +type teeReaderCloser struct { + // io.Reader will be a tee reader that is used during logging. + // This structure will read from a body and write the contents to a logger. + io.Reader + // Source is used just to close when we are done reading. + Source io.ReadCloser +} + +func (reader *teeReaderCloser) Close() error { + return reader.Source.Close() +} + +// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent +// to a service. Will include the HTTP request body if the LogLevel of the +// request matches LogDebugWithHTTPBody. +var LogHTTPRequestHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequest", + Fn: logRequest, +} + +func logRequest(r *request.Request) { + if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil { + return + } + + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + bodySeekable := aws.IsReaderSeekable(r.Body) + + b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + if logBody { + if !bodySeekable { + r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) + } + // Reset the request body because dumpRequest will re-wrap the + // r.HTTPRequest's Body as a NoOpCloser and will not be reset after + // read by the HTTP client reader. + if err := r.Error; err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent +// to a service. Will only log the HTTP request's headers. The request payload +// will not be read. +var LogHTTPRequestHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequestHeader", + Fn: logRequestHeader, +} + +func logRequestHeader(r *request.Request) { + if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil { + return + } + + b, err := httputil.DumpRequestOut(r.HTTPRequest, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +const logRespMsg = `DEBUG: Response %s/%s Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +const logRespErrMsg = `DEBUG ERROR: Response %s/%s: +---[ RESPONSE DUMP ERROR ]----------------------------- +%s +-----------------------------------------------------` + +// LogHTTPResponseHandler is a SDK request handler to log the HTTP response +// received from a service. Will include the HTTP response body if the LogLevel +// of the request matches LogDebugWithHTTPBody. +var LogHTTPResponseHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponse", + Fn: logResponse, +} + +func logResponse(r *request.Request) { + if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil { + return + } + + lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} + + if r.HTTPResponse == nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil")) + return + } + + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + if logBody { + r.HTTPResponse.Body = &teeReaderCloser{ + Reader: io.TeeReader(r.HTTPResponse.Body, lw), + Source: r.HTTPResponse.Body, + } + } + + handlerFn := func(req *request.Request) { + b, err := httputil.DumpResponse(req.HTTPResponse, false) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + lw.Logger.Log(fmt.Sprintf(logRespMsg, + req.ClientInfo.ServiceName, req.Operation.Name, string(b))) + + if logBody { + b, err := ioutil.ReadAll(lw.buf) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + lw.Logger.Log(string(b)) + } + } + + const handlerName = "awsdk.client.LogResponse.ResponseBody" + + r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) + r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) +} + +// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP +// response received from a service. Will only log the HTTP response's headers. +// The response payload will not be read. +var LogHTTPResponseHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponseHeader", + Fn: logResponseHeader, +} + +func logResponseHeader(r *request.Request) { + if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil { + return + } + + b, err := httputil.DumpResponse(r.HTTPResponse, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logRespMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go new file mode 100644 index 000000000..a7530ebb3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -0,0 +1,15 @@ +package metadata + +// ClientInfo wraps immutable data from the client.Client structure. +type ClientInfo struct { + ServiceName string + ServiceID string + APIVersion string + PartitionID string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string + ResolvedRegion string +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go new file mode 100644 index 000000000..881d575f0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go @@ -0,0 +1,28 @@ +package client + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// NoOpRetryer provides a retryer that performs no retries. +// It should be used when we do not want retries to be performed. +type NoOpRetryer struct{} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API; For NoOpRetryer the MaxRetries will always be zero. +func (d NoOpRetryer) MaxRetries() int { + return 0 +} + +// ShouldRetry will always return false for NoOpRetryer, as it should never retry. +func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool { + return false +} + +// RetryRules returns the delay duration before retrying this request again; +// since NoOpRetryer does not retry, RetryRules always returns 0. +func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration { + return 0 +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go new file mode 100644 index 000000000..c483e0cb8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -0,0 +1,670 @@ +package aws + +import ( + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" +) + +// UseServiceDefaultRetries instructs the config to use the service's own +// default number of retries. This will be the default action if +// Config.MaxRetries is nil also. +const UseServiceDefaultRetries = -1 + +// RequestRetryer is an alias for a type that implements the request.Retryer +// interface. +type RequestRetryer interface{} + +// A Config provides service configuration for service clients. By default, +// all clients will use the defaults.DefaultConfig structure. +// +// // Create Session with MaxRetries configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(&aws.Config{ +// MaxRetries: aws.Int(3), +// })) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), +// }) +type Config struct { + // Enables verbose error printing of all credential chain errors. + // Should be used when wanting to see all errors while attempting to + // retrieve credentials. + CredentialsChainVerboseErrors *bool + + // The credentials object to use when signing requests. Defaults to a + // chain of credential providers to search for credentials in environment + // variables, shared credential file, and EC2 Instance Roles. + Credentials *credentials.Credentials + + // An optional endpoint URL (hostname only or fully qualified URI) + // that overrides the default generated endpoint for a client. Set this + // to `nil` or the value to `""` to use the default generated endpoint. + // + // Note: You must still provide a `Region` value when specifying an + // endpoint for a client. + Endpoint *string + + // The resolver to use for looking up endpoints for AWS service clients + // to use based on region. + EndpointResolver endpoints.Resolver + + // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call + // ShouldRetry regardless of whether or not if request.Retryable is set. + // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck + // is not set, then ShouldRetry will only be called if request.Retryable is nil. + // Proper handling of the request.Retryable field is important when setting this field. + EnforceShouldRetryCheck *bool + + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS + // Regions and Endpoints. + Region *string + + // Set this to `true` to disable SSL when sending requests. Defaults + // to `false`. + DisableSSL *bool + + // The HTTP client to use when sending requests. Defaults to + // `http.DefaultClient`. + HTTPClient *http.Client + + // An integer value representing the logging level. The default log level + // is zero (LogOff), which represents no logging. To enable logging set + // to a LogLevel Value. + LogLevel *LogLevelType + + // The logger writer interface to write logging messages to. Defaults to + // standard out. + Logger Logger + + // The maximum number of times that a request will be retried for failures. + // Defaults to -1, which defers the max retry setting to the service + // specific configuration. + MaxRetries *int + + // Retryer guides how HTTP requests should be retried in case of + // recoverable failures. + // + // When nil or the value does not implement the request.Retryer interface, + // the client.DefaultRetryer will be used. + // + // When both Retryer and MaxRetries are non-nil, the former is used and + // the latter ignored. + // + // To set the Retryer field in a type-safe manner and with chaining, use + // the request.WithRetryer helper function: + // + // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) + // + Retryer RequestRetryer + + // Disables semantic parameter validation, which validates input for + // missing required fields and/or other semantic request input errors. + DisableParamValidation *bool + + // Disables the computation of request and response checksums, e.g., + // CRC32 checksums in Amazon DynamoDB. + DisableComputeChecksums *bool + + // Set this to `true` to force the request to use path-style addressing, + // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client + // will use virtual hosted bucket addressing when possible + // (`http://BUCKET.s3.amazonaws.com/KEY`). + // + // Note: This configuration option is specific to the Amazon S3 service. + // + // See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // for Amazon S3: Virtual Hosting of Buckets + S3ForcePathStyle *bool + + // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` + // header to PUT requests over 2MB of content. 100-Continue instructs the + // HTTP client not to send the body until the service responds with a + // `continue` status. This is useful to prevent sending the request body + // until after the request is authenticated, and validated. + // + // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html + // + // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s + // `ExpectContinueTimeout` for information on adjusting the continue wait + // timeout. https://golang.org/pkg/net/http/#Transport + // + // You should use this flag to disable 100-Continue if you experience issues + // with proxies or third party S3 compatible services. + S3Disable100Continue *bool + + // Set this to `true` to enable S3 Accelerate feature. For all operations + // compatible with S3 Accelerate will use the accelerate endpoint for + // requests. Requests not compatible will fall back to normal S3 requests. + // + // The bucket must be enable for accelerate to be used with S3 client with + // accelerate enabled. If the bucket is not enabled for accelerate an error + // will be returned. The bucket name must be DNS compatible to also work + // with accelerate. + S3UseAccelerate *bool + + // S3DisableContentMD5Validation config option is temporarily disabled, + // For S3 GetObject API calls, #1837. + // + // Set this to `true` to disable the S3 service client from automatically + // adding the ContentMD5 to S3 Object Put and Upload API calls. This option + // will also disable the SDK from performing object ContentMD5 validation + // on GetObject API calls. + S3DisableContentMD5Validation *bool + + // Set this to `true` to have the S3 service client to use the region specified + // in the ARN, when an ARN is provided as an argument to a bucket parameter. + S3UseARNRegion *bool + + // Set this to `true` to enable the SDK to unmarshal API response header maps to + // normalized lower case map keys. + // + // For example S3's X-Amz-Meta prefixed header will be unmarshaled to lower case + // Metadata member's map keys. The value of the header in the map is unaffected. + // + // The AWS SDK for Go v2, uses lower case header maps by default. The v1 + // SDK provides this opt-in for this option, for backwards compatibility. + LowerCaseHeaderMaps *bool + + // Set this to `true` to disable the EC2Metadata client from overriding the + // default http.Client's Timeout. This is helpful if you do not want the + // EC2Metadata client to create a new http.Client. This options is only + // meaningful if you're not already using a custom HTTP client with the + // SDK. Enabled by default. + // + // Must be set and provided to the session.NewSession() in order to disable + // the EC2Metadata overriding the timeout for default credentials chain. + // + // Example: + // sess := session.Must(session.NewSession(aws.NewConfig() + // .WithEC2MetadataDisableTimeoutOverride(true))) + // + // svc := s3.New(sess) + // + EC2MetadataDisableTimeoutOverride *bool + + // Set this to `false` to disable EC2Metadata client from falling back to IMDSv1. + // By default, EC2 role credentials will fall back to IMDSv1 as needed for backwards compatibility. + // You can disable this behavior by explicitly setting this flag to `false`. When false, the EC2Metadata + // client will return any errors encountered from attempting to fetch a token instead of silently + // using the insecure data flow of IMDSv1. + // + // Example: + // sess := session.Must(session.NewSession(aws.NewConfig() + // .WithEC2MetadataEnableFallback(false))) + // + // svc := s3.New(sess) + // + // See [configuring IMDS] for more information. + // + // [configuring IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html + EC2MetadataEnableFallback *bool + + // Instructs the endpoint to be generated for a service client to + // be the dual stack endpoint. The dual stack endpoint will support + // both IPv4 and IPv6 addressing. + // + // Setting this for a service which does not support dual stack will fail + // to make requests. It is not recommended to set this value on the session + // as it will apply to all service clients created with the session. Even + // services which don't support dual stack endpoints. + // + // If the Endpoint config value is also provided the UseDualStack flag + // will be ignored. + // + // Only supported with. + // + // sess := session.Must(session.NewSession()) + // + // svc := s3.New(sess, &aws.Config{ + // UseDualStack: aws.Bool(true), + // }) + // + // Deprecated: This option will continue to function for S3 and S3 Control for backwards compatibility. + // UseDualStackEndpoint should be used to enable usage of a service's dual-stack endpoint for all service clients + // moving forward. For S3 and S3 Control, when UseDualStackEndpoint is set to a non-zero value it takes higher + // precedence then this option. + UseDualStack *bool + + // Sets the resolver to resolve a dual-stack endpoint for the service. + UseDualStackEndpoint endpoints.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint endpoints.FIPSEndpointState + + // SleepDelay is an override for the func the SDK will call when sleeping + // during the lifecycle of a request. Specifically this will be used for + // request delays. This value should only be used for testing. To adjust + // the delay of a request see the aws/client.DefaultRetryer and + // aws/request.Retryer. + // + // SleepDelay will prevent any Context from being used for canceling retry + // delay of an API operation. It is recommended to not use SleepDelay at all + // and specify a Retryer instead. + SleepDelay func(time.Duration) + + // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests. + // Will default to false. This would only be used for empty directory names in s3 requests. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // DisableRestProtocolURICleaning: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("//foo//bar//moo"), + // }) + DisableRestProtocolURICleaning *bool + + // EnableEndpointDiscovery will allow for endpoint discovery on operations that + // have the definition in its model. By default, endpoint discovery is off. + // To use EndpointDiscovery, Endpoint should be unset or set to an empty string. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // EnableEndpointDiscovery: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("/foo/bar/moo"), + // }) + EnableEndpointDiscovery *bool + + // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing + // request endpoint hosts with modeled information. + // + // Disabling this feature is useful when you want to use local endpoints + // for testing that do not support the modeled host prefix pattern. + DisableEndpointHostPrefix *bool + + // STSRegionalEndpoint will enable regional or legacy endpoint resolving + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // S3UsEast1RegionalEndpoint will enable regional or legacy endpoint resolving + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint +} + +// NewConfig returns a new Config pointer that can be chained with builder +// methods to set multiple configuration values inline without using pointers. +// +// // Create Session with MaxRetries configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(aws.NewConfig(). +// WithMaxRetries(3), +// )) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, aws.NewConfig(). +// WithRegion("us-west-2"), +// ) +func NewConfig() *Config { + return &Config{} +} + +// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning +// a Config pointer. +func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { + c.CredentialsChainVerboseErrors = &verboseErrs + return c +} + +// WithCredentials sets a config Credentials value returning a Config pointer +// for chaining. +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { + c.Credentials = creds + return c +} + +// WithEndpoint sets a config Endpoint value returning a Config pointer for +// chaining. +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = &endpoint + return c +} + +// WithEndpointResolver sets a config EndpointResolver value returning a +// Config pointer for chaining. +func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config { + c.EndpointResolver = resolver + return c +} + +// WithRegion sets a config Region value returning a Config pointer for +// chaining. +func (c *Config) WithRegion(region string) *Config { + c.Region = ®ion + return c +} + +// WithDisableSSL sets a config DisableSSL value returning a Config pointer +// for chaining. +func (c *Config) WithDisableSSL(disable bool) *Config { + c.DisableSSL = &disable + return c +} + +// WithHTTPClient sets a config HTTPClient value returning a Config pointer +// for chaining. +func (c *Config) WithHTTPClient(client *http.Client) *Config { + c.HTTPClient = client + return c +} + +// WithMaxRetries sets a config MaxRetries value returning a Config pointer +// for chaining. +func (c *Config) WithMaxRetries(max int) *Config { + c.MaxRetries = &max + return c +} + +// WithDisableParamValidation sets a config DisableParamValidation value +// returning a Config pointer for chaining. +func (c *Config) WithDisableParamValidation(disable bool) *Config { + c.DisableParamValidation = &disable + return c +} + +// WithDisableComputeChecksums sets a config DisableComputeChecksums value +// returning a Config pointer for chaining. +func (c *Config) WithDisableComputeChecksums(disable bool) *Config { + c.DisableComputeChecksums = &disable + return c +} + +// WithLogLevel sets a config LogLevel value returning a Config pointer for +// chaining. +func (c *Config) WithLogLevel(level LogLevelType) *Config { + c.LogLevel = &level + return c +} + +// WithLogger sets a config Logger value returning a Config pointer for +// chaining. +func (c *Config) WithLogger(logger Logger) *Config { + c.Logger = logger + return c +} + +// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config +// pointer for chaining. +func (c *Config) WithS3ForcePathStyle(force bool) *Config { + c.S3ForcePathStyle = &force + return c +} + +// WithS3Disable100Continue sets a config S3Disable100Continue value returning +// a Config pointer for chaining. +func (c *Config) WithS3Disable100Continue(disable bool) *Config { + c.S3Disable100Continue = &disable + return c +} + +// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config +// pointer for chaining. +func (c *Config) WithS3UseAccelerate(enable bool) *Config { + c.S3UseAccelerate = &enable + return c + +} + +// WithS3DisableContentMD5Validation sets a config +// S3DisableContentMD5Validation value returning a Config pointer for chaining. +func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config { + c.S3DisableContentMD5Validation = &enable + return c + +} + +// WithS3UseARNRegion sets a config S3UseARNRegion value and +// returning a Config pointer for chaining +func (c *Config) WithS3UseARNRegion(enable bool) *Config { + c.S3UseARNRegion = &enable + return c +} + +// WithUseDualStack sets a config UseDualStack value returning a Config +// pointer for chaining. +func (c *Config) WithUseDualStack(enable bool) *Config { + c.UseDualStack = &enable + return c +} + +// WithUseFIPSEndpoint sets a config UseFIPSEndpoint value returning a Config +// pointer for chaining. +func (c *Config) WithUseFIPSEndpoint(enable bool) *Config { + if enable { + c.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + } else { + c.UseFIPSEndpoint = endpoints.FIPSEndpointStateDisabled + } + return c +} + +// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { + c.EC2MetadataDisableTimeoutOverride = &enable + return c +} + +// WithEC2MetadataEnableFallback sets a config EC2MetadataEnableFallback value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataEnableFallback(v bool) *Config { + c.EC2MetadataEnableFallback = &v + return c +} + +// WithSleepDelay overrides the function used to sleep while waiting for the +// next retry. Defaults to time.Sleep. +func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { + c.SleepDelay = fn + return c +} + +// WithEndpointDiscovery will set whether or not to use endpoint discovery. +func (c *Config) WithEndpointDiscovery(t bool) *Config { + c.EnableEndpointDiscovery = &t + return c +} + +// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix +// when making requests. +func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config { + c.DisableEndpointHostPrefix = &t + return c +} + +// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag +// when resolving the endpoint for a service +func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config { + c.STSRegionalEndpoint = sre + return c +} + +// WithS3UsEast1RegionalEndpoint will set whether or not to use regional endpoint flag +// when resolving the endpoint for a service +func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEndpoint) *Config { + c.S3UsEast1RegionalEndpoint = sre + return c +} + +// WithLowerCaseHeaderMaps sets a config LowerCaseHeaderMaps value +// returning a Config pointer for chaining. +func (c *Config) WithLowerCaseHeaderMaps(t bool) *Config { + c.LowerCaseHeaderMaps = &t + return c +} + +// WithDisableRestProtocolURICleaning sets a config DisableRestProtocolURICleaning value +// returning a Config pointer for chaining. +func (c *Config) WithDisableRestProtocolURICleaning(t bool) *Config { + c.DisableRestProtocolURICleaning = &t + return c +} + +// MergeIn merges the passed in configs into the existing config object. +func (c *Config) MergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + +func mergeInConfig(dst *Config, other *Config) { + if other == nil { + return + } + + if other.CredentialsChainVerboseErrors != nil { + dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors + } + + if other.Credentials != nil { + dst.Credentials = other.Credentials + } + + if other.Endpoint != nil { + dst.Endpoint = other.Endpoint + } + + if other.EndpointResolver != nil { + dst.EndpointResolver = other.EndpointResolver + } + + if other.Region != nil { + dst.Region = other.Region + } + + if other.DisableSSL != nil { + dst.DisableSSL = other.DisableSSL + } + + if other.HTTPClient != nil { + dst.HTTPClient = other.HTTPClient + } + + if other.LogLevel != nil { + dst.LogLevel = other.LogLevel + } + + if other.Logger != nil { + dst.Logger = other.Logger + } + + if other.MaxRetries != nil { + dst.MaxRetries = other.MaxRetries + } + + if other.Retryer != nil { + dst.Retryer = other.Retryer + } + + if other.DisableParamValidation != nil { + dst.DisableParamValidation = other.DisableParamValidation + } + + if other.DisableComputeChecksums != nil { + dst.DisableComputeChecksums = other.DisableComputeChecksums + } + + if other.S3ForcePathStyle != nil { + dst.S3ForcePathStyle = other.S3ForcePathStyle + } + + if other.S3Disable100Continue != nil { + dst.S3Disable100Continue = other.S3Disable100Continue + } + + if other.S3UseAccelerate != nil { + dst.S3UseAccelerate = other.S3UseAccelerate + } + + if other.S3DisableContentMD5Validation != nil { + dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation + } + + if other.S3UseARNRegion != nil { + dst.S3UseARNRegion = other.S3UseARNRegion + } + + if other.UseDualStack != nil { + dst.UseDualStack = other.UseDualStack + } + + if other.UseDualStackEndpoint != endpoints.DualStackEndpointStateUnset { + dst.UseDualStackEndpoint = other.UseDualStackEndpoint + } + + if other.EC2MetadataDisableTimeoutOverride != nil { + dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride + } + + if other.EC2MetadataEnableFallback != nil { + dst.EC2MetadataEnableFallback = other.EC2MetadataEnableFallback + } + + if other.SleepDelay != nil { + dst.SleepDelay = other.SleepDelay + } + + if other.DisableRestProtocolURICleaning != nil { + dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning + } + + if other.EnforceShouldRetryCheck != nil { + dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck + } + + if other.EnableEndpointDiscovery != nil { + dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery + } + + if other.DisableEndpointHostPrefix != nil { + dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix + } + + if other.STSRegionalEndpoint != endpoints.UnsetSTSEndpoint { + dst.STSRegionalEndpoint = other.STSRegionalEndpoint + } + + if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint { + dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint + } + + if other.LowerCaseHeaderMaps != nil { + dst.LowerCaseHeaderMaps = other.LowerCaseHeaderMaps + } + + if other.UseDualStackEndpoint != endpoints.DualStackEndpointStateUnset { + dst.UseDualStackEndpoint = other.UseDualStackEndpoint + } + + if other.UseFIPSEndpoint != endpoints.FIPSEndpointStateUnset { + dst.UseFIPSEndpoint = other.UseFIPSEndpoint + } +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c *Config) Copy(cfgs ...*Config) *Config { + dst := &Config{} + dst.MergeIn(c) + + for _, cfg := range cfgs { + dst.MergeIn(cfg) + } + + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go new file mode 100644 index 000000000..89aad2c67 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go @@ -0,0 +1,38 @@ +//go:build !go1.9 +// +build !go1.9 + +package aws + +import "time" + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go new file mode 100644 index 000000000..6ee9ddd18 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go @@ -0,0 +1,12 @@ +//go:build go1.9 +// +build go1.9 + +package aws + +import "context" + +// Context is an alias of the Go stdlib's context.Context interface. +// It can be used within the SDK's API operation "WithContext" methods. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go new file mode 100644 index 000000000..313218190 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go @@ -0,0 +1,23 @@ +//go:build !go1.7 +// +build !go1.7 + +package aws + +import ( + "github.com/aws/aws-sdk-go/internal/context" +) + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return context.BackgroundCtx +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go new file mode 100644 index 000000000..9975d561b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go @@ -0,0 +1,21 @@ +//go:build go1.7 +// +build go1.7 + +package aws + +import "context" + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return context.Background() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go new file mode 100644 index 000000000..304fd1561 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go @@ -0,0 +1,24 @@ +package aws + +import ( + "time" +) + +// SleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the Context's +// error will be returned. +// +// Expects Context to always return a non-nil error if the Done channel is closed. +func SleepWithContext(ctx Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go new file mode 100644 index 000000000..4e076c183 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -0,0 +1,918 @@ +package aws + +import "time" + +// String returns a pointer to the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint returns a pointer to the uint value passed in. +func Uint(v uint) *uint { + return &v +} + +// UintValue returns the value of the uint pointer passed in or +// 0 if the pointer is nil. +func UintValue(v *uint) uint { + if v != nil { + return *v + } + return 0 +} + +// UintSlice converts a slice of uint values uinto a slice of +// uint pointers +func UintSlice(src []uint) []*uint { + dst := make([]*uint, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// UintValueSlice converts a slice of uint pointers uinto a slice of +// uint values +func UintValueSlice(src []*uint) []uint { + dst := make([]uint, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// UintMap converts a string map of uint values uinto a string +// map of uint pointers +func UintMap(src map[string]uint) map[string]*uint { + dst := make(map[string]*uint) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// UintValueMap converts a string map of uint pointers uinto a string +// map of uint values +func UintValueMap(src map[string]*uint) map[string]uint { + dst := make(map[string]uint) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int8 returns a pointer to the int8 value passed in. +func Int8(v int8) *int8 { + return &v +} + +// Int8Value returns the value of the int8 pointer passed in or +// 0 if the pointer is nil. +func Int8Value(v *int8) int8 { + if v != nil { + return *v + } + return 0 +} + +// Int8Slice converts a slice of int8 values into a slice of +// int8 pointers +func Int8Slice(src []int8) []*int8 { + dst := make([]*int8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int8ValueSlice converts a slice of int8 pointers into a slice of +// int8 values +func Int8ValueSlice(src []*int8) []int8 { + dst := make([]int8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int8Map converts a string map of int8 values into a string +// map of int8 pointers +func Int8Map(src map[string]int8) map[string]*int8 { + dst := make(map[string]*int8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int8ValueMap converts a string map of int8 pointers into a string +// map of int8 values +func Int8ValueMap(src map[string]*int8) map[string]int8 { + dst := make(map[string]int8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int16 returns a pointer to the int16 value passed in. +func Int16(v int16) *int16 { + return &v +} + +// Int16Value returns the value of the int16 pointer passed in or +// 0 if the pointer is nil. +func Int16Value(v *int16) int16 { + if v != nil { + return *v + } + return 0 +} + +// Int16Slice converts a slice of int16 values into a slice of +// int16 pointers +func Int16Slice(src []int16) []*int16 { + dst := make([]*int16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int16ValueSlice converts a slice of int16 pointers into a slice of +// int16 values +func Int16ValueSlice(src []*int16) []int16 { + dst := make([]int16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int16Map converts a string map of int16 values into a string +// map of int16 pointers +func Int16Map(src map[string]int16) map[string]*int16 { + dst := make(map[string]*int16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int16ValueMap converts a string map of int16 pointers into a string +// map of int16 values +func Int16ValueMap(src map[string]*int16) map[string]int16 { + dst := make(map[string]int16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int32 returns a pointer to the int32 value passed in. +func Int32(v int32) *int32 { + return &v +} + +// Int32Value returns the value of the int32 pointer passed in or +// 0 if the pointer is nil. +func Int32Value(v *int32) int32 { + if v != nil { + return *v + } + return 0 +} + +// Int32Slice converts a slice of int32 values into a slice of +// int32 pointers +func Int32Slice(src []int32) []*int32 { + dst := make([]*int32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int32ValueSlice converts a slice of int32 pointers into a slice of +// int32 values +func Int32ValueSlice(src []*int32) []int32 { + dst := make([]int32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int32Map converts a string map of int32 values into a string +// map of int32 pointers +func Int32Map(src map[string]int32) map[string]*int32 { + dst := make(map[string]*int32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int32ValueMap converts a string map of int32 pointers into a string +// map of int32 values +func Int32ValueMap(src map[string]*int32) map[string]int32 { + dst := make(map[string]int32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint8 returns a pointer to the uint8 value passed in. +func Uint8(v uint8) *uint8 { + return &v +} + +// Uint8Value returns the value of the uint8 pointer passed in or +// 0 if the pointer is nil. +func Uint8Value(v *uint8) uint8 { + if v != nil { + return *v + } + return 0 +} + +// Uint8Slice converts a slice of uint8 values into a slice of +// uint8 pointers +func Uint8Slice(src []uint8) []*uint8 { + dst := make([]*uint8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint8ValueSlice converts a slice of uint8 pointers into a slice of +// uint8 values +func Uint8ValueSlice(src []*uint8) []uint8 { + dst := make([]uint8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint8Map converts a string map of uint8 values into a string +// map of uint8 pointers +func Uint8Map(src map[string]uint8) map[string]*uint8 { + dst := make(map[string]*uint8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint8ValueMap converts a string map of uint8 pointers into a string +// map of uint8 values +func Uint8ValueMap(src map[string]*uint8) map[string]uint8 { + dst := make(map[string]uint8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint16 returns a pointer to the uint16 value passed in. +func Uint16(v uint16) *uint16 { + return &v +} + +// Uint16Value returns the value of the uint16 pointer passed in or +// 0 if the pointer is nil. +func Uint16Value(v *uint16) uint16 { + if v != nil { + return *v + } + return 0 +} + +// Uint16Slice converts a slice of uint16 values into a slice of +// uint16 pointers +func Uint16Slice(src []uint16) []*uint16 { + dst := make([]*uint16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint16ValueSlice converts a slice of uint16 pointers into a slice of +// uint16 values +func Uint16ValueSlice(src []*uint16) []uint16 { + dst := make([]uint16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint16Map converts a string map of uint16 values into a string +// map of uint16 pointers +func Uint16Map(src map[string]uint16) map[string]*uint16 { + dst := make(map[string]*uint16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint16ValueMap converts a string map of uint16 pointers into a string +// map of uint16 values +func Uint16ValueMap(src map[string]*uint16) map[string]uint16 { + dst := make(map[string]uint16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint32 returns a pointer to the uint32 value passed in. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint32Value returns the value of the uint32 pointer passed in or +// 0 if the pointer is nil. +func Uint32Value(v *uint32) uint32 { + if v != nil { + return *v + } + return 0 +} + +// Uint32Slice converts a slice of uint32 values into a slice of +// uint32 pointers +func Uint32Slice(src []uint32) []*uint32 { + dst := make([]*uint32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint32ValueSlice converts a slice of uint32 pointers into a slice of +// uint32 values +func Uint32ValueSlice(src []*uint32) []uint32 { + dst := make([]uint32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint32Map converts a string map of uint32 values into a string +// map of uint32 pointers +func Uint32Map(src map[string]uint32) map[string]*uint32 { + dst := make(map[string]*uint32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint32ValueMap converts a string map of uint32 pointers into a string +// map of uint32 values +func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { + dst := make(map[string]uint32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint64 returns a pointer to the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return &v +} + +// Uint64Value returns the value of the uint64 pointer passed in or +// 0 if the pointer is nil. +func Uint64Value(v *uint64) uint64 { + if v != nil { + return *v + } + return 0 +} + +// Uint64Slice converts a slice of uint64 values into a slice of +// uint64 pointers +func Uint64Slice(src []uint64) []*uint64 { + dst := make([]*uint64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint64ValueSlice converts a slice of uint64 pointers into a slice of +// uint64 values +func Uint64ValueSlice(src []*uint64) []uint64 { + dst := make([]uint64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint64Map converts a string map of uint64 values into a string +// map of uint64 pointers +func Uint64Map(src map[string]uint64) map[string]*uint64 { + dst := make(map[string]*uint64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint64ValueMap converts a string map of uint64 pointers into a string +// map of uint64 values +func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { + dst := make(map[string]uint64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float32 returns a pointer to the float32 value passed in. +func Float32(v float32) *float32 { + return &v +} + +// Float32Value returns the value of the float32 pointer passed in or +// 0 if the pointer is nil. +func Float32Value(v *float32) float32 { + if v != nil { + return *v + } + return 0 +} + +// Float32Slice converts a slice of float32 values into a slice of +// float32 pointers +func Float32Slice(src []float32) []*float32 { + dst := make([]*float32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float32ValueSlice converts a slice of float32 pointers into a slice of +// float32 values +func Float32ValueSlice(src []*float32) []float32 { + dst := make([]float32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float32Map converts a string map of float32 values into a string +// map of float32 pointers +func Float32Map(src map[string]float32) map[string]*float32 { + dst := make(map[string]*float32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float32ValueMap converts a string map of float32 pointers into a string +// map of float32 values +func Float32ValueMap(src map[string]*float32) map[string]float32 { + dst := make(map[string]float32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// SecondsTimeValue converts an int64 pointer to a time.Time value +// representing seconds since Epoch or time.Time{} if the pointer is nil. +func SecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix((*v / 1000), 0) + } + return time.Time{} +} + +// MillisecondsTimeValue converts an int64 pointer to a time.Time value +// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil. +func MillisecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix(0, (*v * 1000000)) + } + return time.Time{} +} + +// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". +// The result is undefined if the Unix time cannot be represented by an int64. +// Which includes calling TimeUnixMilli on a zero Time is undefined. +// +// This utility is useful for service API's such as CloudWatch Logs which require +// their unix time values to be in milliseconds. +// +// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. +func TimeUnixMilli(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/awsinternal.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/awsinternal.go new file mode 100644 index 000000000..140242dd1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/awsinternal.go @@ -0,0 +1,4 @@ +// DO NOT EDIT +package corehandlers + +const isAwsInternal = "" \ No newline at end of file diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go new file mode 100644 index 000000000..36a915efe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -0,0 +1,232 @@ +package corehandlers + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Interface for matching types which also have a Len method. +type lener interface { + Len() int +} + +// BuildContentLengthHandler builds the content length of a request based on the body, +// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable +// to determine request body length and no "Content-Length" was specified it will panic. +// +// The Content-Length will only be added to the request if the length of the body +// is greater than 0. If the body is empty or the current `Content-Length` +// header is <= 0, the header will also be stripped. +var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { + var length int64 + + if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { + length, _ = strconv.ParseInt(slength, 10, 64) + } else { + if r.Body != nil { + var err error + length, err = aws.SeekerLen(r.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err) + return + } + } + } + + if length > 0 { + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) + } else { + r.HTTPRequest.ContentLength = 0 + r.HTTPRequest.Header.Del("Content-Length") + } +}} + +var reStatusCode = regexp.MustCompile(`^(\d{3})`) + +// ValidateReqSigHandler is a request handler to ensure that the request's +// signature doesn't expire before it is sent. This can happen when a request +// is built and signed significantly before it is sent. Or significant delays +// occur when retrying requests that would cause the signature to expire. +var ValidateReqSigHandler = request.NamedHandler{ + Name: "core.ValidateReqSigHandler", + Fn: func(r *request.Request) { + // Unsigned requests are not signed + if r.Config.Credentials == credentials.AnonymousCredentials { + return + } + + signedTime := r.Time + if !r.LastSignedAt.IsZero() { + signedTime = r.LastSignedAt + } + + // 5 minutes to allow for some clock skew/delays in transmission. + // Would be improved with aws/aws-sdk-go#423 + if signedTime.Add(5 * time.Minute).After(time.Now()) { + return + } + + fmt.Println("request expired, resigning") + r.Sign() + }, +} + +// SendHandler is a request handler to send service request using HTTP client. +var SendHandler = request.NamedHandler{ + Name: "core.SendHandler", + Fn: func(r *request.Request) { + sender := sendFollowRedirects + if r.DisableFollowRedirects { + sender = sendWithoutFollowRedirects + } + + if request.NoBody == r.HTTPRequest.Body { + // Strip off the request body if the NoBody reader was used as a + // place holder for a request body. This prevents the SDK from + // making requests with a request body when it would be invalid + // to do so. + // + // Use a shallow copy of the http.Request to ensure the race condition + // of transport on Body will not trigger + reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest + reqCopy.Body = nil + r.HTTPRequest = &reqCopy + defer func() { + r.HTTPRequest = reqOrig + }() + } + + var err error + r.HTTPResponse, err = sender(r) + if err != nil { + handleSendError(r, err) + } + }, +} + +func sendFollowRedirects(r *request.Request) (*http.Response, error) { + return r.Config.HTTPClient.Do(r.HTTPRequest) +} + +func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) { + transport := r.Config.HTTPClient.Transport + if transport == nil { + transport = http.DefaultTransport + } + + return transport.RoundTrip(r.HTTPRequest) +} + +func handleSendError(r *request.Request, err error) { + // Prevent leaking if an HTTPResponse was returned. Clean up + // the body. + if r.HTTPResponse != nil { + r.HTTPResponse.Body.Close() + } + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other URL redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) + r.HTTPResponse = &http.Response{ + StatusCode: int(code), + Status: http.StatusText(int(code)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + return + } + } + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all request errors, and let the default retrier determine + // if the error is retryable. + r.Error = awserr.New(request.ErrCodeRequestError, "send request failed", err) + + // Override the error with a context canceled error, if that was canceled. + ctx := r.Context() + select { + case <-ctx.Done(): + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", ctx.Err()) + r.Retryable = aws.Bool(false) + default: + } +} + +// ValidateResponseHandler is a request handler to validate service response. +var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { + if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { + // this may be replaced by an UnmarshalError handler + r.Error = awserr.New("UnknownError", "unknown error", r.Error) + } +}} + +// AfterRetryHandler performs final checks to determine if the request should +// be retried and how long to delay. +var AfterRetryHandler = request.NamedHandler{ + Name: "core.AfterRetryHandler", + Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } + + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) + + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(r.RetryDelay) + } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } + + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } + + r.RetryCount++ + r.Error = nil + } + }} + +// ValidateEndpointHandler is a request handler to validate a request had the +// appropriate Region and Endpoint set. Will set r.Error if the endpoint or +// region is not valid. +var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { + if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { + r.Error = aws.ErrMissingRegion + } else if r.ClientInfo.Endpoint == "" { + // Was any endpoint provided by the user, or one was derived by the + // SDK's endpoint resolver? + r.Error = aws.ErrMissingEndpoint + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go new file mode 100644 index 000000000..7d50b1557 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go @@ -0,0 +1,17 @@ +package corehandlers + +import "github.com/aws/aws-sdk-go/aws/request" + +// ValidateParametersHandler is a request handler to validate the input parameters. +// Validating parameters only has meaning if done prior to the request being sent. +var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { + if !r.ParamsFilled() { + return + } + + if v, ok := r.Params.(request.Validator); ok { + if err := v.Validate(); err != nil { + r.Error = err + } + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go new file mode 100644 index 000000000..ac842c55d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go @@ -0,0 +1,47 @@ +package corehandlers + +import ( + "os" + "runtime" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version +// to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +const execEnvVar = `AWS_EXECUTION_ENV` +const execEnvUAKey = `exec-env` + +// AddHostExecEnvUserAgentHander is a request handler appending the SDK's +// execution environment to the user agent. +// +// If the environment variable AWS_EXECUTION_ENV is set, its value will be +// appended to the user agent string. +var AddHostExecEnvUserAgentHander = request.NamedHandler{ + Name: "core.AddHostExecEnvUserAgentHander", + Fn: func(r *request.Request) { + v := os.Getenv(execEnvVar) + if len(v) == 0 { + return + } + + request.AddToUserAgent(r, execEnvUAKey+"/"+v) + }, +} + +var AddAwsInternal = request.NamedHandler{ + Name: "core.AddAwsInternal", + Fn: func(r *request.Request) { + if len(isAwsInternal) == 0 { + return + } + request.AddToUserAgent(r, isAwsInternal) + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go new file mode 100644 index 000000000..3ad1e798d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -0,0 +1,100 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var ( + // ErrNoValidProvidersFoundInChain Is returned when there are no valid + // providers in the ChainProvider. + // + // This has been deprecated. For verbose error messaging set + // aws.Config.CredentialsChainVerboseErrors to true. + ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", + `no valid providers in chain. Deprecated. + For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, + nil) +) + +// A ChainProvider will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The ChainProvider provides a way of chaining multiple providers together +// which will pick the first available using priority order of the Providers +// in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +// +// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. +// In this example EnvProvider will first check if any credentials are available +// via the environment variables. If there are none ChainProvider will check +// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider +// does not return any credentials ChainProvider will return the error +// ErrNoValidProvidersFoundInChain +// +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvProvider{}, +// &ec2rolecreds.EC2RoleProvider{ +// Client: ec2metadata.New(sess), +// }, +// }) +// +// // Usage of ChainCredentials with aws.Config +// svc := ec2.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: creds, +// }))) +// +type ChainProvider struct { + Providers []Provider + curr Provider + VerboseErrors bool +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *ChainProvider) Retrieve() (Value, error) { + var errs []error + for _, p := range c.Providers { + creds, err := p.Retrieve() + if err == nil { + c.curr = p + return creds, nil + } + errs = append(errs, err) + } + c.curr = nil + + var err error + err = ErrNoValidProvidersFoundInChain + if c.VerboseErrors { + err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) + } + return Value{}, err +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *ChainProvider) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go new file mode 100644 index 000000000..6e3406b1f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go @@ -0,0 +1,23 @@ +//go:build !go1.7 +// +build !go1.7 + +package credentials + +import ( + "github.com/aws/aws-sdk-go/internal/context" +) + +// backgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func backgroundContext() Context { + return context.BackgroundCtx +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go new file mode 100644 index 000000000..a68df0ee7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go @@ -0,0 +1,21 @@ +//go:build go1.7 +// +build go1.7 + +package credentials + +import "context" + +// backgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func backgroundContext() Context { + return context.Background() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go new file mode 100644 index 000000000..0345fab2d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go @@ -0,0 +1,40 @@ +//go:build !go1.9 +// +build !go1.9 + +package credentials + +import "time" + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// This type, aws.Context, and context.Context are equivalent. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go new file mode 100644 index 000000000..79018aba7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go @@ -0,0 +1,14 @@ +//go:build go1.9 +// +build go1.9 + +package credentials + +import "context" + +// Context is an alias of the Go stdlib's context.Context interface. +// It can be used within the SDK's API operation "WithContext" methods. +// +// This type, aws.Context, and context.Context are equivalent. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go new file mode 100644 index 000000000..a880a3de8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -0,0 +1,383 @@ +// Package credentials provides credential retrieval and management +// +// The Credentials is the primary method of getting access to and managing +// credentials Values. Using dependency injection retrieval of the credential +// values is handled by a object which satisfies the Provider interface. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials Value have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := credentials.NewEnvCredentials() +// +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{}) +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := credentials.NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials + +import ( + "fmt" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/sync/singleflight" +) + +// AnonymousCredentials is an empty Credential object that can be used as +// dummy placeholder credentials for requests that do not need signed. +// +// This Credentials can be used to configure a service to not sign requests +// when making service API calls. For example, when accessing public +// s3 buckets. +// +// svc := s3.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: credentials.AnonymousCredentials, +// }))) +// // Access public S3 buckets. +var AnonymousCredentials = NewStaticCredentials("", "", "") + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Provider used to get credentials + ProviderName string +} + +// HasKeys returns if the credentials Value has both AccessKeyID and +// SecretAccessKey value set. +func (v Value) HasKeys() bool { + return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0 +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +// +// The Provider should not need to implement its own mutexes, because +// that will be managed by Credentials. +type Provider interface { + // Retrieve returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// ProviderWithContext is a Provider that can retrieve credentials with a Context +type ProviderWithContext interface { + Provider + + RetrieveWithContext(Context) (Value, error) +} + +// An Expirer is an interface that Providers can implement to expose the expiration +// time, if known. If the Provider cannot accurately provide this info, +// it should not implement this interface. +type Expirer interface { + // The time at which the credentials are no longer valid + ExpiresAt() time.Time +} + +// An ErrorProvider is a stub credentials provider that always returns an error +// this is used by the SDK when construction a known provider is not possible +// due to an error. +type ErrorProvider struct { + // The error to be returned from Retrieve + Err error + + // The provider name to set on the Retrieved returned Value + ProviderName string +} + +// Retrieve will always return the error that the ErrorProvider was created with. +func (p ErrorProvider) Retrieve() (Value, error) { + return Value{ProviderName: p.ProviderName}, p.Err +} + +// IsExpired will always return not expired. +func (p ErrorProvider) IsExpired() bool { + return false +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type EC2RoleProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. Available for testing + // to be able to mock out the current time. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + // Passed in expirations should have the monotonic clock values stripped. + // This ensures time comparisons will be based on wall-time. + e.expiration = expiration.Round(0) + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + curTime := e.CurrentTime + if curTime == nil { + curTime = time.Now + } + return e.expiration.Before(curTime()) +} + +// ExpiresAt returns the expiration time of the credential +func (e *Expiry) ExpiresAt() time.Time { + return e.expiration +} + +// A Credentials provides concurrency safe retrieval of AWS credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + sf singleflight.Group + + m sync.RWMutex + creds Value + provider Provider +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + c := &Credentials{ + provider: provider, + } + return c +} + +// GetWithContext returns the credentials value, or error if the credentials +// Value failed to be retrieved. Will return early if the passed in context is +// canceled. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +// +// Passed in Context is equivalent to aws.Context, and context.Context. +func (c *Credentials) GetWithContext(ctx Context) (Value, error) { + // Check if credentials are cached, and not expired. + select { + case curCreds, ok := <-c.asyncIsExpired(): + // ok will only be true, of the credentials were not expired. ok will + // be false and have no value if the credentials are expired. + if ok { + return curCreds, nil + } + case <-ctx.Done(): + return Value{}, awserr.New("RequestCanceled", + "request context canceled", ctx.Err()) + } + + // Cannot pass context down to the actual retrieve, because the first + // context would cancel the whole group when there is not direct + // association of items in the group. + resCh := c.sf.DoChan("", func() (interface{}, error) { + return c.singleRetrieve(&suppressedContext{ctx}) + }) + select { + case res := <-resCh: + return res.Val.(Value), res.Err + case <-ctx.Done(): + return Value{}, awserr.New("RequestCanceled", + "request context canceled", ctx.Err()) + } +} + +func (c *Credentials) singleRetrieve(ctx Context) (interface{}, error) { + c.m.Lock() + defer c.m.Unlock() + + if curCreds := c.creds; !c.isExpiredLocked(curCreds) { + return curCreds, nil + } + + var creds Value + var err error + if p, ok := c.provider.(ProviderWithContext); ok { + creds, err = p.RetrieveWithContext(ctx) + } else { + creds, err = c.provider.Retrieve() + } + if err == nil { + c.creds = creds + } + + return creds, err +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + return c.GetWithContext(backgroundContext()) +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.m.Lock() + defer c.m.Unlock() + + c.creds = Value{} +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be retrieved. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.m.RLock() + defer c.m.RUnlock() + + return c.isExpiredLocked(c.creds) +} + +// asyncIsExpired returns a channel of credentials Value. If the channel is +// closed the credentials are expired and credentials value are not empty. +func (c *Credentials) asyncIsExpired() <-chan Value { + ch := make(chan Value, 1) + go func() { + c.m.RLock() + defer c.m.RUnlock() + + if curCreds := c.creds; !c.isExpiredLocked(curCreds) { + ch <- curCreds + } + + close(ch) + }() + + return ch +} + +// isExpiredLocked helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpiredLocked(creds interface{}) bool { + return creds == nil || creds.(Value) == Value{} || c.provider.IsExpired() +} + +// ExpiresAt provides access to the functionality of the Expirer interface of +// the underlying Provider, if it supports that interface. Otherwise, it returns +// an error. +func (c *Credentials) ExpiresAt() (time.Time, error) { + c.m.RLock() + defer c.m.RUnlock() + + expirer, ok := c.provider.(Expirer) + if !ok { + return time.Time{}, awserr.New("ProviderNotExpirer", + fmt.Sprintf("provider %s does not support ExpiresAt()", + c.creds.ProviderName), + nil) + } + if c.creds == (Value{}) { + // set expiration time to the distant past + return time.Time{}, nil + } + return expirer.ExpiresAt(), nil +} + +type suppressedContext struct { + Context +} + +func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) { + return time.Time{}, false +} + +func (s *suppressedContext) Done() <-chan struct{} { + return nil +} + +func (s *suppressedContext) Err() error { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go new file mode 100644 index 000000000..92af5b725 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -0,0 +1,188 @@ +package ec2rolecreds + +import ( + "bufio" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkuri" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// Example how to configure the EC2RoleProvider with custom http Client, Endpoint +// or ExpiryWindow +// +// p := &ec2rolecreds.EC2RoleProvider{ +// // Pass in a custom timeout to be used when requesting +// // IAM EC2 Role credentials. +// Client: ec2metadata.New(sess, aws.Config{ +// HTTPClient: &http.Client{Timeout: 10 * time.Second}, +// }), +// +// // Do not use early expiry of credentials. If a non zero value is +// // specified the credentials will be expired early +// ExpiryWindow: 0, +// } +type EC2RoleProvider struct { + credentials.Expiry + + // Required EC2Metadata client to use when connecting to EC2 metadata service. + Client *ec2metadata.EC2Metadata + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. +// The ConfigProvider is satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: ec2metadata.New(c), + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 +// metadata service. +func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: client, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { + return m.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + credsList, err := requestCredList(ctx, m.Client) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + if len(credsList) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) + } + credsName := credsList[0] + + roleCreds, err := requestCred(ctx, m.Client, credsName) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + ProviderName: ProviderName, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshaling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "iam/security-credentials/" + +// requestCredList requests a list of credentials from the EC2 service. +// If there are no credentials, or there is an error making or receiving the request +func requestCredList(ctx aws.Context, client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadataWithContext(ctx, iamSecurityCredsPath) + if err != nil { + return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) + } + + credsList := []string{} + s := bufio.NewScanner(strings.NewReader(resp)) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, awserr.New(request.ErrCodeSerialization, + "failed to read EC2 instance role from metadata service", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(ctx aws.Context, client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadataWithContext(ctx, sdkuri.PathJoin(iamSecurityCredsPath, credsName)) + if err != nil { + return ec2RoleCredRespBody{}, + awserr.New("EC2RoleRequestError", + fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), + err) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + awserr.New(request.ErrCodeSerialization, + fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), + err) + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) + } + + return respCreds, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go new file mode 100644 index 000000000..329f788a3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -0,0 +1,255 @@ +// Package endpointcreds provides support for retrieving credentials from an +// arbitrary HTTP endpoint. +// +// The credentials endpoint Provider can receive both static and refreshable +// credentials that will expire. Credentials are static when an "Expiration" +// value is not provided in the endpoint's response. +// +// Static credentials will never expire once they have been retrieved. The format +// of the static credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// } +// +// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration +// value in the response. The format of the refreshable credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// "Token" : "AQoDY....=", +// "Expiration" : "2016-02-25T06:03:31Z" +// } +// +// Errors should be returned in the following format and only returned with 400 +// or 500 HTTP status codes. +// { +// "code": "ErrorCode", +// "message": "Helpful error message." +// } +package endpointcreds + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" +) + +// ProviderName is the name of the credentials provider. +const ProviderName = `CredentialsEndpointProvider` + +// Provider satisfies the credentials.Provider interface, and is a client to +// retrieve credentials from an arbitrary endpoint. +type Provider struct { + staticCreds bool + credentials.Expiry + + // Requires a AWS Client to make HTTP requests to the endpoint with. + // the Endpoint the request will be made to is provided by the aws.Config's + // Endpoint value. + Client *client.Client + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // Optional authorization token value if set will be used as the value of + // the Authorization header of the endpoint credential request. + // + // When constructed from environment, the provider will use the value of + // AWS_CONTAINER_AUTHORIZATION_TOKEN environment variable as the token + // + // Will be overridden if AuthorizationTokenProvider is configured + AuthorizationToken string + + // Optional auth provider func to dynamically load the auth token from a file + // everytime a credential is retrieved + // + // When constructed from environment, the provider will read and use the content + // of the file pointed to by AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE environment variable + // as the auth token everytime credentials are retrieved + // + // Will override AuthorizationToken if configured + AuthorizationTokenProvider AuthTokenProvider +} + +// AuthTokenProvider defines an interface to dynamically load a value to be passed +// for the Authorization header of a credentials request. +type AuthTokenProvider interface { + GetToken() (string, error) +} + +// TokenProviderFunc is a func type implementing AuthTokenProvider interface +// and enables customizing token provider behavior +type TokenProviderFunc func() (string, error) + +// GetToken func retrieves auth token according to TokenProviderFunc implementation +func (p TokenProviderFunc) GetToken() (string, error) { + return p() +} + +// NewProviderClient returns a credentials Provider for retrieving AWS credentials +// from arbitrary endpoint. +func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider { + p := &Provider{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "CredentialsEndpoint", + Endpoint: endpoint, + }, + handlers, + ), + } + + p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler) + p.Client.Handlers.UnmarshalError.PushBack(unmarshalError) + p.Client.Handlers.Validate.Clear() + p.Client.Handlers.Validate.PushBack(validateEndpointHandler) + + for _, option := range options { + option(p) + } + + return p +} + +// NewCredentialsClient returns a pointer to a new Credentials object +// wrapping the endpoint credentials Provider. +func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { + return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *Provider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// Retrieve will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + resp, err := p.getCredentials(ctx) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("CredentialsEndpointError", "failed to load credentials", err) + } + + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } else { + p.staticCreds = true + } + + return credentials.Value{ + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.Token, + ProviderName: ProviderName, + }, nil +} + +type getCredentialsOutput struct { + Expiration *time.Time + AccessKeyID string + SecretAccessKey string + Token string +} + +type errorOutput struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error) { + op := &request.Operation{ + Name: "GetCredentials", + HTTPMethod: "GET", + } + + out := &getCredentialsOutput{} + req := p.Client.NewRequest(op, nil, out) + req.SetContext(ctx) + req.HTTPRequest.Header.Set("Accept", "application/json") + + authToken := p.AuthorizationToken + var err error + if p.AuthorizationTokenProvider != nil { + authToken, err = p.AuthorizationTokenProvider.GetToken() + if err != nil { + return nil, fmt.Errorf("get authorization token: %v", err) + } + } + + if strings.ContainsAny(authToken, "\r\n") { + return nil, fmt.Errorf("authorization token contains invalid newline sequence") + } + if len(authToken) != 0 { + req.HTTPRequest.Header.Set("Authorization", authToken) + } + + return out, req.Send() +} + +func validateEndpointHandler(r *request.Request) { + if len(r.ClientInfo.Endpoint) == 0 { + r.Error = aws.ErrMissingEndpoint + } +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + out := r.Data.(*getCredentialsOutput) + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, + "failed to decode endpoint credentials", + err, + ) + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var errOut errorOutput + err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to decode error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New(errOut.Code, errOut.Message, nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go new file mode 100644 index 000000000..54c5cf733 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -0,0 +1,74 @@ +package credentials + +import ( + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// EnvProviderName provides a name of Env provider +const EnvProviderName = "EnvProvider" + +var ( + // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be + // found in the process's environment. + ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) + + // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key + // can't be found in the process's environment. + ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) +) + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. Environment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY +type EnvProvider struct { + retrieved bool +} + +// NewEnvCredentials returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvCredentials() *Credentials { + return NewCredentials(&EnvProvider{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + if id == "" { + return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound + } + + if secret == "" { + return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + ProviderName: EnvProviderName, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvProvider) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini new file mode 100644 index 000000000..7fc91d9d2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini @@ -0,0 +1,12 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go new file mode 100644 index 000000000..18694f07f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go @@ -0,0 +1,438 @@ +/* +Package processcreds is a credential Provider to retrieve `credential_process` +credentials. + +WARNING: The following describes a method of sourcing credentials from an external +process. This can potentially be dangerous, so proceed with caution. Other +credential providers should be preferred if at all possible. If using this +option, you should make sure that the config file is as locked down as possible +using security best practices for your operating system. + +You can use credentials from a `credential_process` in a variety of ways. + +One way is to setup your shared config file, located in the default +location, with the `credential_process` key and the command you want to be +called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable +(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. + + [default] + credential_process = /command/to/call + +Creating a new session will use the credential process to retrieve credentials. +NOTE: If there are credentials in the profile you are using, the credential +process will not be used. + + // Initialize a session to load credentials. + sess, _ := session.NewSession(&aws.Config{ + Region: aws.String("us-east-1")}, + ) + + // Create S3 service client to use the credentials. + svc := s3.New(sess) + +Another way to use the `credential_process` method is by using +`credentials.NewCredentials()` and providing a command to be executed to +retrieve credentials: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentials("/path/to/command") + + // Create service client value configured for credentials. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +You can set a non-default timeout for the `credential_process` with another +constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To +set a one minute timeout: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentialsTimeout( + "/path/to/command", + time.Duration(500) * time.Millisecond) + +If you need more control, you can set any configurable options in the +credentials using one or more option functions. For example, you can set a two +minute timeout, a credential duration of 60 minutes, and a maximum stdout +buffer size of 2k. + + creds := processcreds.NewCredentials( + "/path/to/command", + func(opt *ProcessProvider) { + opt.Timeout = time.Duration(2) * time.Minute + opt.Duration = time.Duration(60) * time.Minute + opt.MaxBufSize = 2048 + }) + +You can also use your own `exec.Cmd`: + + // Create an exec.Cmd + myCommand := exec.Command("/path/to/command") + + // Create credentials using your exec.Cmd and custom timeout + creds := processcreds.NewCredentialsCommand( + myCommand, + func(opt *processcreds.ProcessProvider) { + opt.Timeout = time.Duration(1) * time.Second + }) +*/ +package processcreds + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "runtime" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +const ( + // ProviderName is the name this credentials provider will label any + // returned credentials Value with. + ProviderName = `ProcessProvider` + + // ErrCodeProcessProviderParse error parsing process output + ErrCodeProcessProviderParse = "ProcessProviderParseError" + + // ErrCodeProcessProviderVersion version error in output + ErrCodeProcessProviderVersion = "ProcessProviderVersionError" + + // ErrCodeProcessProviderRequired required attribute missing in output + ErrCodeProcessProviderRequired = "ProcessProviderRequiredError" + + // ErrCodeProcessProviderExecution execution of command failed + ErrCodeProcessProviderExecution = "ProcessProviderExecutionError" + + // errMsgProcessProviderTimeout process took longer than allowed + errMsgProcessProviderTimeout = "credential process timed out" + + // errMsgProcessProviderProcess process error + errMsgProcessProviderProcess = "error in credential_process" + + // errMsgProcessProviderParse problem parsing output + errMsgProcessProviderParse = "parse failed of credential_process output" + + // errMsgProcessProviderVersion version error in output + errMsgProcessProviderVersion = "wrong version in process output (not 1)" + + // errMsgProcessProviderMissKey missing access key id in output + errMsgProcessProviderMissKey = "missing AccessKeyId in process output" + + // errMsgProcessProviderMissSecret missing secret acess key in output + errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output" + + // errMsgProcessProviderPrepareCmd prepare of command failed + errMsgProcessProviderPrepareCmd = "failed to prepare command" + + // errMsgProcessProviderEmptyCmd command must not be empty + errMsgProcessProviderEmptyCmd = "command must not be empty" + + // errMsgProcessProviderPipe failed to initialize pipe + errMsgProcessProviderPipe = "failed to initialize pipe" + + // DefaultDuration is the default amount of time in minutes that the + // credentials will be valid for. + DefaultDuration = time.Duration(15) * time.Minute + + // DefaultBufSize limits buffer size from growing to an enormous + // amount due to a faulty process. + DefaultBufSize = int(8 * sdkio.KibiByte) + + // DefaultTimeout default limit on time a process can run. + DefaultTimeout = time.Duration(1) * time.Minute +) + +// ProcessProvider satisfies the credentials.Provider interface, and is a +// client to retrieve credentials from a process. +type ProcessProvider struct { + staticCreds bool + credentials.Expiry + originalCommand []string + + // Expiry duration of the credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // A string representing an os command that should return a JSON with + // credential information. + command *exec.Cmd + + // MaxBufSize limits memory usage from growing to an enormous + // amount due to a faulty process. + MaxBufSize int + + // Timeout limits the time a process can run. + Timeout time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// ProcessProvider. The credentials will expire every 15 minutes by default. +func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: exec.Command(command), + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsTimeout returns a pointer to a new Credentials object with +// the specified command and timeout, and default duration and max buffer size. +func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials { + p := NewCredentials(command, func(opt *ProcessProvider) { + opt.Timeout = timeout + }) + + return p +} + +// NewCredentialsCommand returns a pointer to a new Credentials object with +// the specified command, and default timeout, duration and max buffer size. +func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: command, + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// A CredentialProcessResponse is the AWS credentials format that must be +// returned when executing an external credential_process. +type CredentialProcessResponse struct { + // As of this writing, the Version key must be set to 1. This might + // increment over time as the structure evolves. + Version int + + // The access key ID that identifies the temporary security credentials. + AccessKeyID string `json:"AccessKeyId"` + + // The secret access key that can be used to sign requests. + SecretAccessKey string + + // The token that users must pass to the service API to use the temporary credentials. + SessionToken string + + // The date on which the current credentials expire. + Expiration *time.Time +} + +// Retrieve executes the 'credential_process' and returns the credentials. +func (p *ProcessProvider) Retrieve() (credentials.Value, error) { + out, err := p.executeCredentialProcess() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // Serialize and validate response + resp := &CredentialProcessResponse{} + if err = json.Unmarshal(out, resp); err != nil { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderParse, + fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)), + err) + } + + if resp.Version != 1 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderVersion, + errMsgProcessProviderVersion, + nil) + } + + if len(resp.AccessKeyID) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissKey, + nil) + } + + if len(resp.SecretAccessKey) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissSecret, + nil) + } + + // Handle expiration + p.staticCreds = resp.Expiration == nil + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } + + return credentials.Value{ + ProviderName: ProviderName, + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.SessionToken, + }, nil +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *ProcessProvider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// prepareCommand prepares the command to be executed. +func (p *ProcessProvider) prepareCommand() error { + + var cmdArgs []string + if runtime.GOOS == "windows" { + cmdArgs = []string{"cmd.exe", "/C"} + } else { + cmdArgs = []string{"sh", "-c"} + } + + if len(p.originalCommand) == 0 { + p.originalCommand = make([]string, len(p.command.Args)) + copy(p.originalCommand, p.command.Args) + + // check for empty command because it succeeds + if len(strings.TrimSpace(p.originalCommand[0])) < 1 { + return awserr.New( + ErrCodeProcessProviderExecution, + fmt.Sprintf( + "%s: %s", + errMsgProcessProviderPrepareCmd, + errMsgProcessProviderEmptyCmd), + nil) + } + } + + cmdArgs = append(cmdArgs, p.originalCommand...) + p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...) + p.command.Env = os.Environ() + + return nil +} + +// executeCredentialProcess starts the credential process on the OS and +// returns the results or an error. +func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) { + + if err := p.prepareCommand(); err != nil { + return nil, err + } + + // Setup the pipes + outReadPipe, outWritePipe, err := os.Pipe() + if err != nil { + return nil, awserr.New( + ErrCodeProcessProviderExecution, + errMsgProcessProviderPipe, + err) + } + + p.command.Stderr = os.Stderr // display stderr on console for MFA + p.command.Stdout = outWritePipe // get creds json on process's stdout + p.command.Stdin = os.Stdin // enable stdin for MFA + + output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize)) + + stdoutCh := make(chan error, 1) + go readInput( + io.LimitReader(outReadPipe, int64(p.MaxBufSize)), + output, + stdoutCh) + + execCh := make(chan error, 1) + go executeCommand(*p.command, execCh) + + finished := false + var errors []error + for !finished { + select { + case readError := <-stdoutCh: + errors = appendError(errors, readError) + finished = true + case execError := <-execCh: + err := outWritePipe.Close() + errors = appendError(errors, err) + errors = appendError(errors, execError) + if errors != nil { + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderProcess, + errors) + } + case <-time.After(p.Timeout): + finished = true + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderTimeout, + errors) // errors can be nil + } + } + + out := output.Bytes() + + if runtime.GOOS == "windows" { + // windows adds slashes to quotes + out = []byte(strings.Replace(string(out), `\"`, `"`, -1)) + } + + return out, nil +} + +// appendError conveniently checks for nil before appending slice +func appendError(errors []error, err error) []error { + if err != nil { + return append(errors, err) + } + return errors +} + +func executeCommand(cmd exec.Cmd, exec chan error) { + // Start the command + err := cmd.Start() + if err == nil { + err = cmd.Wait() + } + + exec <- err +} + +func readInput(r io.Reader, w io.Writer, read chan error) { + tee := io.TeeReader(r, w) + + _, err := ioutil.ReadAll(tee) + + if err == io.EOF { + err = nil + } + + read <- err // will only arrive here when write end of pipe is closed +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go new file mode 100644 index 000000000..22b5c5d9f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -0,0 +1,151 @@ +package credentials + +import ( + "fmt" + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/ini" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// SharedCredsProviderName provides a name of SharedCreds provider +const SharedCredsProviderName = "SharedCredentialsProvider" + +var ( + // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. + ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) +) + +// A SharedCredentialsProvider retrieves access key pair (access key ID, +// secret access key, and session token if present) credentials from the current +// user's home directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type SharedCredentialsProvider struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewSharedCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewSharedCredentials(filename, profile string) *Credentials { + return NewCredentials(&SharedCredentialsProvider{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *SharedCredentialsProvider) Retrieve() (Value, error) { + p.retrieved = false + + filename, err := p.filename() + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + creds, err := loadProfile(filename, p.profile()) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + p.retrieved = true + return creds, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *SharedCredentialsProvider) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (Value, error) { + config, err := ini.OpenFile(filename) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) + } + + iniProfile, ok := config.GetSection(profile) + if !ok { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil) + } + + id := iniProfile.String("aws_access_key_id") + if len(id) == 0 { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", + fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), + nil) + } + + secret := iniProfile.String("aws_secret_access_key") + if len(secret) == 0 { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", + fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), + nil) + } + + // Default to empty string if not found + token := iniProfile.String("aws_session_token") + + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + ProviderName: SharedCredsProviderName, + }, nil +} + +// filename returns the filename to use to read AWS shared credentials. +// +// Will return an error if the user's home directory path cannot be found. +func (p *SharedCredentialsProvider) filename() (string, error) { + if len(p.Filename) != 0 { + return p.Filename, nil + } + + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 { + return p.Filename, nil + } + + if home := shareddefaults.UserHomeDir(); len(home) == 0 { + // Backwards compatibility of home directly not found error being returned. + // This error is too verbose, failure when opening the file would of been + // a better error to return. + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = shareddefaults.SharedCredentialsFilename() + + return p.Filename, nil +} + +// profile returns the AWS shared credentials profile. If empty will read +// environment variable "AWS_PROFILE". If that is not set profile will +// return "default". +func (p *SharedCredentialsProvider) profile() string { + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + } + if p.Profile == "" { + p.Profile = "default" + } + + return p.Profile +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go new file mode 100644 index 000000000..18c940ab3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go @@ -0,0 +1,60 @@ +// Package ssocreds provides a credential provider for retrieving temporary AWS credentials using an SSO access token. +// +// IMPORTANT: The provider in this package does not initiate or perform the AWS SSO login flow. The SDK provider +// expects that you have already performed the SSO login flow using AWS CLI using the "aws sso login" command, or by +// some other mechanism. The provider must find a valid non-expired access token for the AWS SSO user portal URL in +// ~/.aws/sso/cache. If a cached token is not found, it is expired, or the file is malformed an error will be returned. +// +// Loading AWS SSO credentials with the AWS shared configuration file +// +// You can use configure AWS SSO credentials from the AWS shared configuration file by +// providing the specifying the required keys in the profile: +// +// sso_account_id +// sso_region +// sso_role_name +// sso_start_url +// +// For example, the following defines a profile "devsso" and specifies the AWS SSO parameters that defines the target +// account, role, sign-on portal, and the region where the user portal is located. Note: all SSO arguments must be +// provided, or an error will be returned. +// +// [profile devsso] +// sso_start_url = https://my-sso-portal.awsapps.com/start +// sso_role_name = SSOReadOnlyRole +// sso_region = us-east-1 +// sso_account_id = 123456789012 +// +// Using the config module, you can load the AWS SDK shared configuration, and specify that this profile be used to +// retrieve credentials. For example: +// +// sess, err := session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// Profile: "devsso", +// }) +// if err != nil { +// return err +// } +// +// Programmatically loading AWS SSO credentials directly +// +// You can programmatically construct the AWS SSO Provider in your application, and provide the necessary information +// to load and retrieve temporary credentials using an access token from ~/.aws/sso/cache. +// +// svc := sso.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), // Client Region must correspond to the AWS SSO user portal region +// }) +// +// provider := ssocreds.NewCredentialsWithClient(svc, "123456789012", "SSOReadOnlyRole", "https://my-sso-portal.awsapps.com/start") +// +// credentials, err := provider.Get() +// if err != nil { +// return err +// } +// +// Additional Resources +// +// Configuring the AWS CLI to use AWS Single Sign-On: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +// +// AWS Single Sign-On User Guide: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html +package ssocreds diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go new file mode 100644 index 000000000..d4df39a7a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go @@ -0,0 +1,10 @@ +//go:build !windows +// +build !windows + +package ssocreds + +import "os" + +func getHomeDirectory() string { + return os.Getenv("HOME") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go new file mode 100644 index 000000000..eb48f61e5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go @@ -0,0 +1,7 @@ +package ssocreds + +import "os" + +func getHomeDirectory() string { + return os.Getenv("USERPROFILE") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go new file mode 100644 index 000000000..4138e725d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go @@ -0,0 +1,187 @@ +package ssocreds + +import ( + "crypto/sha1" + "encoding/hex" + "encoding/json" + "io/ioutil" + "path/filepath" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/auth/bearer" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sso" + "github.com/aws/aws-sdk-go/service/sso/ssoiface" +) + +// ErrCodeSSOProviderInvalidToken is the code type that is returned if loaded token has expired or is otherwise invalid. +// To refresh the SSO session run aws sso login with the corresponding profile. +const ErrCodeSSOProviderInvalidToken = "SSOProviderInvalidToken" + +const invalidTokenMessage = "the SSO session has expired or is invalid" + +func init() { + nowTime = time.Now + defaultCacheLocation = defaultCacheLocationImpl +} + +var nowTime func() time.Time + +// ProviderName is the name of the provider used to specify the source of credentials. +const ProviderName = "SSOProvider" + +var defaultCacheLocation func() string + +func defaultCacheLocationImpl() string { + return filepath.Join(getHomeDirectory(), ".aws", "sso", "cache") +} + +// Provider is an AWS credential provider that retrieves temporary AWS credentials by exchanging an SSO login token. +type Provider struct { + credentials.Expiry + + // The Client which is configured for the AWS Region where the AWS SSO user portal is located. + Client ssoiface.SSOAPI + + // The AWS account that is assigned to the user. + AccountID string + + // The role name that is assigned to the user. + RoleName string + + // The URL that points to the organization's AWS Single Sign-On (AWS SSO) user portal. + StartURL string + + // The filepath the cached token will be retrieved from. If unset Provider will + // use the startURL to determine the filepath at. + // + // ~/.aws/sso/cache/.json + // + // If custom cached token filepath is used, the Provider's startUrl + // parameter will be ignored. + CachedTokenFilepath string + + // Used by the SSOCredentialProvider if a token configuration + // profile is used in the shared config + TokenProvider bearer.TokenProvider +} + +// NewCredentials returns a new AWS Single Sign-On (AWS SSO) credential provider. The ConfigProvider is expected to be configured +// for the AWS Region where the AWS SSO user portal is located. +func NewCredentials(configProvider client.ConfigProvider, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials { + return NewCredentialsWithClient(sso.New(configProvider), accountID, roleName, startURL, optFns...) +} + +// NewCredentialsWithClient returns a new AWS Single Sign-On (AWS SSO) credential provider. The provided client is expected to be configured +// for the AWS Region where the AWS SSO user portal is located. +func NewCredentialsWithClient(client ssoiface.SSOAPI, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials { + p := &Provider{ + Client: client, + AccountID: accountID, + RoleName: roleName, + StartURL: startURL, + } + + for _, fn := range optFns { + fn(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal +// by exchanging the accessToken present in ~/.aws/sso/cache. +func (p *Provider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal +// by exchanging the accessToken present in ~/.aws/sso/cache. +func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + var accessToken *string + if p.TokenProvider != nil { + token, err := p.TokenProvider.RetrieveBearerToken(ctx) + if err != nil { + return credentials.Value{}, err + } + accessToken = &token.Value + } else { + if p.CachedTokenFilepath == "" { + cachedTokenFilePath, err := getCachedFilePath(p.StartURL) + if err != nil { + return credentials.Value{}, err + } + p.CachedTokenFilepath = cachedTokenFilePath + } + + tokenFile, err := loadTokenFile(p.CachedTokenFilepath) + if err != nil { + return credentials.Value{}, err + } + accessToken = &tokenFile.AccessToken + } + + output, err := p.Client.GetRoleCredentialsWithContext(ctx, &sso.GetRoleCredentialsInput{ + AccessToken: accessToken, + AccountId: &p.AccountID, + RoleName: &p.RoleName, + }) + if err != nil { + return credentials.Value{}, err + } + + expireTime := time.Unix(0, aws.Int64Value(output.RoleCredentials.Expiration)*int64(time.Millisecond)).UTC() + p.SetExpiration(expireTime, 0) + + return credentials.Value{ + AccessKeyID: aws.StringValue(output.RoleCredentials.AccessKeyId), + SecretAccessKey: aws.StringValue(output.RoleCredentials.SecretAccessKey), + SessionToken: aws.StringValue(output.RoleCredentials.SessionToken), + ProviderName: ProviderName, + }, nil +} + +func getCachedFilePath(startUrl string) (string, error) { + hash := sha1.New() + _, err := hash.Write([]byte(startUrl)) + if err != nil { + return "", err + } + return filepath.Join(defaultCacheLocation(), strings.ToLower(hex.EncodeToString(hash.Sum(nil)))+".json"), nil +} + +type token struct { + AccessToken string `json:"accessToken"` + ExpiresAt rfc3339 `json:"expiresAt"` + Region string `json:"region,omitempty"` + StartURL string `json:"startUrl,omitempty"` +} + +func (t token) Expired() bool { + return nowTime().Round(0).After(time.Time(t.ExpiresAt)) +} + +func loadTokenFile(cachedTokenPath string) (t token, err error) { + fileBytes, err := ioutil.ReadFile(cachedTokenPath) + if err != nil { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) + } + + if err := json.Unmarshal(fileBytes, &t); err != nil { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) + } + + if len(t.AccessToken) == 0 { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil) + } + + if t.Expired() { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil) + } + + return t, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go new file mode 100644 index 000000000..f6fa88451 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go @@ -0,0 +1,237 @@ +package ssocreds + +import ( + "crypto/sha1" + "encoding/hex" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go/internal/shareddefaults" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "time" +) + +var resolvedOsUserHomeDir = shareddefaults.UserHomeDir + +// StandardCachedTokenFilepath returns the filepath for the cached SSO token file, or +// error if unable get derive the path. Key that will be used to compute a SHA1 +// value that is hex encoded. +// +// Derives the filepath using the Key as: +// +// ~/.aws/sso/cache/.json +func StandardCachedTokenFilepath(key string) (string, error) { + homeDir := resolvedOsUserHomeDir() + if len(homeDir) == 0 { + return "", fmt.Errorf("unable to get USER's home directory for cached token") + } + hash := sha1.New() + if _, err := hash.Write([]byte(key)); err != nil { + return "", fmt.Errorf("unable to compute cached token filepath key SHA1 hash, %v", err) + } + + cacheFilename := strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json" + + return filepath.Join(homeDir, ".aws", "sso", "cache", cacheFilename), nil +} + +type tokenKnownFields struct { + AccessToken string `json:"accessToken,omitempty"` + ExpiresAt *rfc3339 `json:"expiresAt,omitempty"` + + RefreshToken string `json:"refreshToken,omitempty"` + ClientID string `json:"clientId,omitempty"` + ClientSecret string `json:"clientSecret,omitempty"` +} + +type cachedToken struct { + tokenKnownFields + UnknownFields map[string]interface{} `json:"-"` +} + +// MarshalJSON provides custom marshalling because the standard library Go marshaller ignores unknown/unspecified fields +// when marshalling from a struct: https://pkg.go.dev/encoding/json#Marshal +// This function adds some extra validation to the known fields and captures unknown fields. +func (t cachedToken) MarshalJSON() ([]byte, error) { + fields := map[string]interface{}{} + + setTokenFieldString(fields, "accessToken", t.AccessToken) + setTokenFieldRFC3339(fields, "expiresAt", t.ExpiresAt) + + setTokenFieldString(fields, "refreshToken", t.RefreshToken) + setTokenFieldString(fields, "clientId", t.ClientID) + setTokenFieldString(fields, "clientSecret", t.ClientSecret) + + for k, v := range t.UnknownFields { + if _, ok := fields[k]; ok { + return nil, fmt.Errorf("unknown token field %v, duplicates known field", k) + } + fields[k] = v + } + + return json.Marshal(fields) +} + +func setTokenFieldString(fields map[string]interface{}, key, value string) { + if value == "" { + return + } + fields[key] = value +} +func setTokenFieldRFC3339(fields map[string]interface{}, key string, value *rfc3339) { + if value == nil { + return + } + fields[key] = value +} + +// UnmarshalJSON provides custom unmarshalling because the standard library Go unmarshaller ignores unknown/unspecified +// fields when unmarshalling from a struct: https://pkg.go.dev/encoding/json#Unmarshal +// This function adds some extra validation to the known fields and captures unknown fields. +func (t *cachedToken) UnmarshalJSON(b []byte) error { + var fields map[string]interface{} + if err := json.Unmarshal(b, &fields); err != nil { + return nil + } + + t.UnknownFields = map[string]interface{}{} + + for k, v := range fields { + var err error + switch k { + case "accessToken": + err = getTokenFieldString(v, &t.AccessToken) + case "expiresAt": + err = getTokenFieldRFC3339(v, &t.ExpiresAt) + case "refreshToken": + err = getTokenFieldString(v, &t.RefreshToken) + case "clientId": + err = getTokenFieldString(v, &t.ClientID) + case "clientSecret": + err = getTokenFieldString(v, &t.ClientSecret) + default: + t.UnknownFields[k] = v + } + + if err != nil { + return fmt.Errorf("field %q, %v", k, err) + } + } + + return nil +} + +func getTokenFieldString(v interface{}, value *string) error { + var ok bool + *value, ok = v.(string) + if !ok { + return fmt.Errorf("expect value to be string, got %T", v) + } + return nil +} + +func getTokenFieldRFC3339(v interface{}, value **rfc3339) error { + var stringValue string + if err := getTokenFieldString(v, &stringValue); err != nil { + return err + } + + timeValue, err := parseRFC3339(stringValue) + if err != nil { + return err + } + + *value = &timeValue + return nil +} + +func loadCachedToken(filename string) (cachedToken, error) { + fileBytes, err := ioutil.ReadFile(filename) + if err != nil { + return cachedToken{}, fmt.Errorf("failed to read cached SSO token file, %v", err) + } + + var t cachedToken + if err := json.Unmarshal(fileBytes, &t); err != nil { + return cachedToken{}, fmt.Errorf("failed to parse cached SSO token file, %v", err) + } + + if len(t.AccessToken) == 0 || t.ExpiresAt == nil || time.Time(*t.ExpiresAt).IsZero() { + return cachedToken{}, fmt.Errorf( + "cached SSO token must contain accessToken and expiresAt fields") + } + + return t, nil +} + +func storeCachedToken(filename string, t cachedToken, fileMode os.FileMode) (err error) { + tmpFilename := filename + ".tmp-" + strconv.FormatInt(nowTime().UnixNano(), 10) + if err := writeCacheFile(tmpFilename, fileMode, t); err != nil { + return err + } + + if err := os.Rename(tmpFilename, filename); err != nil { + return fmt.Errorf("failed to replace old cached SSO token file, %v", err) + } + + return nil +} + +func writeCacheFile(filename string, fileMode os.FileMode, t cachedToken) (err error) { + var f *os.File + f, err = os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_RDWR, fileMode) + if err != nil { + return fmt.Errorf("failed to create cached SSO token file %v", err) + } + + defer func() { + closeErr := f.Close() + if err == nil && closeErr != nil { + err = fmt.Errorf("failed to close cached SSO token file, %v", closeErr) + } + }() + + encoder := json.NewEncoder(f) + + if err = encoder.Encode(t); err != nil { + return fmt.Errorf("failed to serialize cached SSO token, %v", err) + } + + return nil +} + +type rfc3339 time.Time + +// UnmarshalJSON decode rfc3339 from JSON format +func (r *rfc3339) UnmarshalJSON(bytes []byte) error { + var value string + var err error + + if err = json.Unmarshal(bytes, &value); err != nil { + return err + } + + *r, err = parseRFC3339(value) + return err +} + +func parseRFC3339(v string) (rfc3339, error) { + parsed, err := time.Parse(time.RFC3339, v) + if err != nil { + return rfc3339{}, fmt.Errorf("expected RFC3339 timestamp: %v", err) + } + + return rfc3339(parsed), nil +} + +// MarshalJSON encode rfc3339 to JSON format time +func (r *rfc3339) MarshalJSON() ([]byte, error) { + value := time.Time(*r).Format(time.RFC3339) + + // Use JSON unmarshal to unescape the quoted value making use of JSON's + // quoting rules. + return json.Marshal(value) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go new file mode 100644 index 000000000..3388b78b4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go @@ -0,0 +1,148 @@ +package ssocreds + +import ( + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/auth/bearer" + "github.com/aws/aws-sdk-go/service/ssooidc" +) + +// CreateTokenAPIClient provides the interface for the SSOTokenProvider's API +// client for calling CreateToken operation to refresh the SSO token. +type CreateTokenAPIClient interface { + CreateToken(input *ssooidc.CreateTokenInput) (*ssooidc.CreateTokenOutput, error) +} + +// SSOTokenProviderOptions provides the options for configuring the +// SSOTokenProvider. +type SSOTokenProviderOptions struct { + // Client that can be overridden + Client CreateTokenAPIClient + + // The path the file containing the cached SSO token will be read from. + // Initialized the NewSSOTokenProvider's cachedTokenFilepath parameter. + CachedTokenFilepath string +} + +// SSOTokenProvider provides a utility for refreshing SSO AccessTokens for +// Bearer Authentication. The SSOTokenProvider can only be used to refresh +// already cached SSO Tokens. This utility cannot perform the initial SSO +// create token. +// +// The initial SSO create token should be preformed with the AWS CLI before the +// Go application using the SSOTokenProvider will need to retrieve the SSO +// token. If the AWS CLI has not created the token cache file, this provider +// will return an error when attempting to retrieve the cached token. +// +// This provider will attempt to refresh the cached SSO token periodically if +// needed when RetrieveBearerToken is called. +// +// A utility such as the AWS CLI must be used to initially create the SSO +// session and cached token file. +// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +type SSOTokenProvider struct { + options SSOTokenProviderOptions +} + +// NewSSOTokenProvider returns an initialized SSOTokenProvider that will +// periodically refresh the SSO token cached stored in the cachedTokenFilepath. +// The cachedTokenFilepath file's content will be rewritten by the token +// provider when the token is refreshed. +// +// The client must be configured for the AWS region the SSO token was created for. +func NewSSOTokenProvider(client CreateTokenAPIClient, cachedTokenFilepath string, optFns ...func(o *SSOTokenProviderOptions)) *SSOTokenProvider { + options := SSOTokenProviderOptions{ + Client: client, + CachedTokenFilepath: cachedTokenFilepath, + } + for _, fn := range optFns { + fn(&options) + } + + provider := &SSOTokenProvider{ + options: options, + } + + return provider +} + +// RetrieveBearerToken returns the SSO token stored in the cachedTokenFilepath +// the SSOTokenProvider was created with. If the token has expired +// RetrieveBearerToken will attempt to refresh it. If the token cannot be +// refreshed or is not present an error will be returned. +// +// A utility such as the AWS CLI must be used to initially create the SSO +// session and cached token file. https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +func (p *SSOTokenProvider) RetrieveBearerToken(ctx aws.Context) (bearer.Token, error) { + cachedToken, err := loadCachedToken(p.options.CachedTokenFilepath) + if err != nil { + return bearer.Token{}, err + } + + if cachedToken.ExpiresAt != nil && nowTime().After(time.Time(*cachedToken.ExpiresAt)) { + cachedToken, err = p.refreshToken(cachedToken) + if err != nil { + return bearer.Token{}, fmt.Errorf("refresh cached SSO token failed, %v", err) + } + } + + expiresAt := toTime((*time.Time)(cachedToken.ExpiresAt)) + return bearer.Token{ + Value: cachedToken.AccessToken, + CanExpire: !expiresAt.IsZero(), + Expires: expiresAt, + }, nil +} + +func (p *SSOTokenProvider) refreshToken(token cachedToken) (cachedToken, error) { + if token.ClientSecret == "" || token.ClientID == "" || token.RefreshToken == "" { + return cachedToken{}, fmt.Errorf("cached SSO token is expired, or not present, and cannot be refreshed") + } + + createResult, err := p.options.Client.CreateToken(&ssooidc.CreateTokenInput{ + ClientId: &token.ClientID, + ClientSecret: &token.ClientSecret, + RefreshToken: &token.RefreshToken, + GrantType: aws.String("refresh_token"), + }) + if err != nil { + return cachedToken{}, fmt.Errorf("unable to refresh SSO token, %v", err) + } + if createResult.ExpiresIn == nil { + return cachedToken{}, fmt.Errorf("missing required field ExpiresIn") + } + if createResult.AccessToken == nil { + return cachedToken{}, fmt.Errorf("missing required field AccessToken") + } + if createResult.RefreshToken == nil { + return cachedToken{}, fmt.Errorf("missing required field RefreshToken") + } + + expiresAt := nowTime().Add(time.Duration(*createResult.ExpiresIn) * time.Second) + + token.AccessToken = *createResult.AccessToken + token.ExpiresAt = (*rfc3339)(&expiresAt) + token.RefreshToken = *createResult.RefreshToken + + fileInfo, err := os.Stat(p.options.CachedTokenFilepath) + if err != nil { + return cachedToken{}, fmt.Errorf("failed to stat cached SSO token file %v", err) + } + + if err = storeCachedToken(p.options.CachedTokenFilepath, token, fileInfo.Mode()); err != nil { + return cachedToken{}, fmt.Errorf("unable to cache refreshed SSO token, %v", err) + } + + return token, nil +} + +func toTime(p *time.Time) (v time.Time) { + if p == nil { + return v + } + + return *p +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go new file mode 100644 index 000000000..cbba1e3d5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -0,0 +1,57 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// StaticProviderName provides a name of Static provider +const StaticProviderName = "StaticProvider" + +var ( + // ErrStaticCredentialsEmpty is emitted when static credentials are empty. + ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) +) + +// A StaticProvider is a set of credentials which are set programmatically, +// and will never expire. +type StaticProvider struct { + Value +} + +// NewStaticCredentials returns a pointer to a new Credentials object +// wrapping a static credentials value provider. Token is only required +// for temporary security credentials retrieved via STS, otherwise an empty +// string can be passed for this parameter. +func NewStaticCredentials(id, secret, token string) *Credentials { + return NewCredentials(&StaticProvider{Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + }}) +} + +// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object +// wrapping the static credentials value provide. Same as NewStaticCredentials +// but takes the creds Value instead of individual fields +func NewStaticCredentialsFromCreds(creds Value) *Credentials { + return NewCredentials(&StaticProvider{Value: creds}) +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty + } + + if len(s.Value.ProviderName) == 0 { + s.Value.ProviderName = StaticProviderName + } + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For StaticProvider, the credentials never expired. +func (s *StaticProvider) IsExpired() bool { + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go new file mode 100644 index 000000000..86db488de --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -0,0 +1,371 @@ +/* +Package stscreds are credential Providers to retrieve STS AWS credentials. + +STS provides multiple ways to retrieve credentials which can be used when making +future AWS service API operation calls. + +The SDK will ensure that per instance of credentials.Credentials all requests +to refresh the credentials will be synchronized. But, the SDK is unable to +ensure synchronous usage of the AssumeRoleProvider if the value is shared +between multiple Credentials, Sessions or service clients. + +# Assume Role + +To assume an IAM role using STS with the SDK you can create a new Credentials +with the SDKs's stscreds package. + + // Initial credentials loaded from SDK's default credential chain. Such as + // the environment, shared credentials (~/.aws/credentials), or EC2 Instance + // Role. These credentials will be used to to make the STS Assume Role API. + sess := session.Must(session.NewSession()) + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. + creds := stscreds.NewCredentials(sess, "myRoleArn") + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +# Assume Role with static MFA Token + +To assume an IAM role with a MFA token you can either specify a MFA token code +directly or provide a function to prompt the user each time the credentials +need to refresh the role's credentials. Specifying the TokenCode should be used +for short lived operations that will not need to be refreshed, and when you do +not want to have direct control over the user provides their MFA token. + +With TokenCode the AssumeRoleProvider will be not be able to refresh the role's +credentials. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN using the MFA token code provided. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenCode = aws.String("00000000") + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +# Assume Role with MFA Token Provider + +To assume an IAM role with MFA for longer running tasks where the credentials +may need to be refreshed setting the TokenProvider field of AssumeRoleProvider +will allow the credential provider to prompt for new MFA token code when the +role's credentials need to be refreshed. + +The StdinTokenProvider function is available to prompt on stdin to retrieve +the MFA token code from the user. You can also implement custom prompts by +satisfing the TokenProvider function signature. + +Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +have undesirable results as the StdinTokenProvider will not be synchronized. A +single Credentials with an AssumeRoleProvider can be shared safely. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenProvider = stscreds.StdinTokenProvider + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) +*/ +package stscreds + +import ( + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkrand" + "github.com/aws/aws-sdk-go/service/sts" +) + +// StdinTokenProvider will prompt on stderr and read from stdin for a string value. +// An error is returned if reading from stdin fails. +// +// Use this function to read MFA tokens from stdin. The function makes no attempt +// to make atomic prompts from stdin across multiple gorouties. +// +// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +// have undesirable results as the StdinTokenProvider will not be synchronized. A +// single Credentials with an AssumeRoleProvider can be shared safely +// +// Will wait forever until something is provided on the stdin. +func StdinTokenProvider() (string, error) { + var v string + fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ") + _, err := fmt.Scanln(&v) + + return v, err +} + +// ProviderName provides a name of AssumeRole provider +const ProviderName = "AssumeRoleProvider" + +// AssumeRoler represents the minimal subset of the STS client API used by this provider. +type AssumeRoler interface { + AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) +} + +type assumeRolerWithContext interface { + AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) +} + +// DefaultDuration is the default amount of time in minutes that the credentials +// will be valid for. +var DefaultDuration = time.Duration(15) * time.Minute + +// AssumeRoleProvider retrieves temporary credentials from the STS service, and +// keeps track of their expiration time. +// +// This credential provider will be used by the SDKs default credential change +// when shared configuration is enabled, and the shared config or shared credentials +// file configure assume role. See Session docs for how to do this. +// +// AssumeRoleProvider does not provide any synchronization and it is not safe +// to share this value across multiple Credentials, Sessions, or service clients +// without also sharing the same Credentials instance. +type AssumeRoleProvider struct { + credentials.Expiry + + // STS client to make assume role request with. + Client AssumeRoler + + // Role to be assumed. + RoleARN string + + // Session name, if you wish to reuse the credentials elsewhere. + RoleSessionName string + + // Optional, you can pass tag key-value pairs to your session. These tags are called session tags. + Tags []*sts.Tag + + // A list of keys for session tags that you want to set as transitive. + // If you set a tag key as transitive, the corresponding key and value passes to subsequent sessions in a role chain. + TransitiveTagKeys []*string + + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // Optional ExternalID to pass along, defaults to nil if not set. + ExternalID *string + + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string + + // The ARNs of IAM managed policies you want to use as managed session policies. + // The policies must exist in the same account as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*sts.PolicyDescriptorType + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + SerialNumber *string + + // The SourceIdentity which is used to identity a persistent identity through the whole session. + // For more details see https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html + SourceIdentity *string + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // If SerialNumber is set and neither TokenCode nor TokenProvider are also + // set an error will be returned. + TokenCode *string + + // Async method of providing MFA token code for assuming an IAM role with MFA. + // The value returned by the function will be used as the TokenCode in the Retrieve + // call. See StdinTokenProvider for a provider that prompts and reads from stdin. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed when SerialNumber is also set and + // TokenCode is not set. + // + // If both TokenCode and TokenProvider is set, TokenProvider will be used and + // TokenCode is ignored. + TokenProvider func() (string, error) + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // MaxJitterFrac reduces the effective Duration of each credential requested + // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must + // have a value between 0 and 1. Any other value may lead to expected behavior. + // With a MaxJitterFrac value of 0, default) will no jitter will be used. + // + // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the + // AssumeRole call will be made with an arbitrary Duration between 27m and + // 30m. + // + // MaxJitterFrac should not be negative. + MaxJitterFrac float64 +} + +// NewCredentials returns a pointer to a new Credentials value wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. The +// Credentials value will attempt to refresh the credentials using the provider +// when Credentials.Get is called, if the cached credentials are expiring. +// +// Takes a Config provider to create the STS client. The ConfigProvider is +// satisfied by the session.Session type. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: sts.New(c), + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials value wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. The +// Credentials value will attempt to refresh the credentials using the provider +// when Credentials.Get is called, if the cached credentials are expiring. +// +// Takes an AssumeRoler which can be satisfied by the STS client. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: svc, + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + // Apply defaults where parameters are not set. + if p.RoleSessionName == "" { + // Try to work out a role name that will hopefully end up unique. + p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) + } + if p.Duration == 0 { + // Expire as often as AWS permits. + p.Duration = DefaultDuration + } + jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration)) + input := &sts.AssumeRoleInput{ + DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)), + RoleArn: aws.String(p.RoleARN), + RoleSessionName: aws.String(p.RoleSessionName), + ExternalId: p.ExternalID, + Tags: p.Tags, + PolicyArns: p.PolicyArns, + TransitiveTagKeys: p.TransitiveTagKeys, + SourceIdentity: p.SourceIdentity, + } + if p.Policy != nil { + input.Policy = p.Policy + } + if p.SerialNumber != nil { + if p.TokenCode != nil { + input.SerialNumber = p.SerialNumber + input.TokenCode = p.TokenCode + } else if p.TokenProvider != nil { + input.SerialNumber = p.SerialNumber + code, err := p.TokenProvider() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + input.TokenCode = aws.String(code) + } else { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("AssumeRoleTokenNotAvailable", + "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil) + } + } + + var roleOutput *sts.AssumeRoleOutput + var err error + + if c, ok := p.Client.(assumeRolerWithContext); ok { + roleOutput, err = c.AssumeRoleWithContext(ctx, input) + } else { + roleOutput, err = p.Client.AssumeRole(input) + } + + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // We will proactively generate new credentials before they expire. + p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: *roleOutput.Credentials.AccessKeyId, + SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, + SessionToken: *roleOutput.Credentials.SessionToken, + ProviderName: ProviderName, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go new file mode 100644 index 000000000..19ad619aa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go @@ -0,0 +1,182 @@ +package stscreds + +import ( + "fmt" + "io/ioutil" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/aws/aws-sdk-go/service/sts/stsiface" +) + +const ( + // ErrCodeWebIdentity will be used as an error code when constructing + // a new error to be returned during session creation or retrieval. + ErrCodeWebIdentity = "WebIdentityErr" + + // WebIdentityProviderName is the web identity provider name + WebIdentityProviderName = "WebIdentityCredentials" +) + +// now is used to return a time.Time object representing +// the current time. This can be used to easily test and +// compare test values. +var now = time.Now + +// TokenFetcher should return WebIdentity token bytes or an error +type TokenFetcher interface { + FetchToken(credentials.Context) ([]byte, error) +} + +// FetchTokenPath is a path to a WebIdentity token file +type FetchTokenPath string + +// FetchToken returns a token by reading from the filesystem +func (f FetchTokenPath) FetchToken(ctx credentials.Context) ([]byte, error) { + data, err := ioutil.ReadFile(string(f)) + if err != nil { + errMsg := fmt.Sprintf("unable to read file at %s", f) + return nil, awserr.New(ErrCodeWebIdentity, errMsg, err) + } + return data, nil +} + +// WebIdentityRoleProvider is used to retrieve credentials using +// an OIDC token. +type WebIdentityRoleProvider struct { + credentials.Expiry + + // The policy ARNs to use with the web identity assumed role. + PolicyArns []*sts.PolicyDescriptorType + + // Duration the STS credentials will be valid for. Truncated to seconds. + // If unset, the assumed role will use AssumeRoleWithWebIdentity's default + // expiry duration. See + // https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#STS.AssumeRoleWithWebIdentity + // for more information. + Duration time.Duration + + // The amount of time the credentials will be refreshed before they expire. + // This is useful refresh credentials before they expire to reduce risk of + // using credentials as they expire. If unset, will default to no expiry + // window. + ExpiryWindow time.Duration + + client stsiface.STSAPI + + tokenFetcher TokenFetcher + roleARN string + roleSessionName string +} + +// NewWebIdentityCredentials will return a new set of credentials with a given +// configuration, role arn, and token file path. +// +// Deprecated: Use NewWebIdentityRoleProviderWithOptions for flexible +// functional options, and wrap with credentials.NewCredentials helper. +func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials { + svc := sts.New(c) + p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path) + return credentials.NewCredentials(p) +} + +// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the +// provided stsiface.STSAPI +// +// Deprecated: Use NewWebIdentityRoleProviderWithOptions for flexible +// functional options. +func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider { + return NewWebIdentityRoleProviderWithOptions(svc, roleARN, roleSessionName, FetchTokenPath(path)) +} + +// NewWebIdentityRoleProviderWithToken will return a new WebIdentityRoleProvider with the +// provided stsiface.STSAPI and a TokenFetcher +// +// Deprecated: Use NewWebIdentityRoleProviderWithOptions for flexible +// functional options. +func NewWebIdentityRoleProviderWithToken(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher) *WebIdentityRoleProvider { + return NewWebIdentityRoleProviderWithOptions(svc, roleARN, roleSessionName, tokenFetcher) +} + +// NewWebIdentityRoleProviderWithOptions will return an initialize +// WebIdentityRoleProvider with the provided stsiface.STSAPI, role ARN, and a +// TokenFetcher. Additional options can be provided as functional options. +// +// TokenFetcher is the implementation that will retrieve the JWT token from to +// assume the role with. Use the provided FetchTokenPath implementation to +// retrieve the JWT token using a file system path. +func NewWebIdentityRoleProviderWithOptions(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher, optFns ...func(*WebIdentityRoleProvider)) *WebIdentityRoleProvider { + p := WebIdentityRoleProvider{ + client: svc, + tokenFetcher: tokenFetcher, + roleARN: roleARN, + roleSessionName: roleSessionName, + } + + for _, fn := range optFns { + fn(&p) + } + + return &p +} + +// Retrieve will attempt to assume a role from a token which is located at +// 'WebIdentityTokenFilePath' specified destination and if that is empty an +// error will be returned. +func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext will attempt to assume a role from a token which is +// located at 'WebIdentityTokenFilePath' specified destination and if that is +// empty an error will be returned. +func (p *WebIdentityRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + b, err := p.tokenFetcher.FetchToken(ctx) + if err != nil { + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed fetching WebIdentity token: ", err) + } + + sessionName := p.roleSessionName + if len(sessionName) == 0 { + // session name is used to uniquely identify a session. This simply + // uses unix time in nanoseconds to uniquely identify sessions. + sessionName = strconv.FormatInt(now().UnixNano(), 10) + } + + var duration *int64 + if p.Duration != 0 { + duration = aws.Int64(int64(p.Duration / time.Second)) + } + + req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{ + PolicyArns: p.PolicyArns, + RoleArn: &p.roleARN, + RoleSessionName: &sessionName, + WebIdentityToken: aws.String(string(b)), + DurationSeconds: duration, + }) + + req.SetContext(ctx) + + // InvalidIdentityToken error is a temporary error that can occur + // when assuming an Role with a JWT web identity token. + req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException) + if err := req.Send(); err != nil { + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err) + } + + p.SetExpiration(aws.TimeValue(resp.Credentials.Expiration), p.ExpiryWindow) + + value := credentials.Value{ + AccessKeyID: aws.StringValue(resp.Credentials.AccessKeyId), + SecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey), + SessionToken: aws.StringValue(resp.Credentials.SessionToken), + ProviderName: WebIdentityProviderName, + } + return value, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go new file mode 100644 index 000000000..25a66d1dd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go @@ -0,0 +1,69 @@ +// Package csm provides the Client Side Monitoring (CSM) client which enables +// sending metrics via UDP connection to the CSM agent. This package provides +// control options, and configuration for the CSM client. The client can be +// controlled manually, or automatically via the SDK's Session configuration. +// +// Enabling CSM client via SDK's Session configuration +// +// The CSM client can be enabled automatically via SDK's Session configuration. +// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT +// environment variable is set to a non-empty value. +// +// The configuration options for the CSM client via the SDK's session +// configuration are: +// +// * AWS_CSM_PORT= +// The port number the CSM agent will receive metrics on. +// +// * AWS_CSM_HOST= +// The hostname, or IP address the CSM agent will receive metrics on. +// Without port number. +// +// Manually enabling the CSM client +// +// The CSM client can be started, paused, and resumed manually. The Start +// function will enable the CSM client to publish metrics to the CSM agent. It +// is safe to call Start concurrently, but if Start is called additional times +// with different ClientID or address it will panic. +// +// r, err := csm.Start("clientID", ":31000") +// if err != nil { +// panic(fmt.Errorf("failed starting CSM: %v", err)) +// } +// +// When controlling the CSM client manually, you must also inject its request +// handlers into the SDK's Session configuration for the SDK's API clients to +// publish metrics. +// +// sess, err := session.NewSession(&aws.Config{}) +// if err != nil { +// panic(fmt.Errorf("failed loading session: %v", err)) +// } +// +// // Add CSM client's metric publishing request handlers to the SDK's +// // Session Configuration. +// r.InjectHandlers(&sess.Handlers) +// +// Controlling CSM client +// +// Once the CSM client has been enabled the Get function will return a Reporter +// value that you can use to pause and resume the metrics published to the CSM +// agent. If Get function is called before the reporter is enabled with the +// Start function or via SDK's Session configuration nil will be returned. +// +// The Pause method can be called to stop the CSM client publishing metrics to +// the CSM agent. The Continue method will resume metric publishing. +// +// // Get the CSM client Reporter. +// r := csm.Get() +// +// // Will pause monitoring +// r.Pause() +// resp, err = client.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +// +// // Resume monitoring +// r.Continue() +package csm diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go new file mode 100644 index 000000000..4b19e2800 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go @@ -0,0 +1,89 @@ +package csm + +import ( + "fmt" + "strings" + "sync" +) + +var ( + lock sync.Mutex +) + +const ( + // DefaultPort is used when no port is specified. + DefaultPort = "31000" + + // DefaultHost is the host that will be used when none is specified. + DefaultHost = "127.0.0.1" +) + +// AddressWithDefaults returns a CSM address built from the host and port +// values. If the host or port is not set, default values will be used +// instead. If host is "localhost" it will be replaced with "127.0.0.1". +func AddressWithDefaults(host, port string) string { + if len(host) == 0 || strings.EqualFold(host, "localhost") { + host = DefaultHost + } + + if len(port) == 0 { + port = DefaultPort + } + + // Only IP6 host can contain a colon + if strings.Contains(host, ":") { + return "[" + host + "]:" + port + } + + return host + ":" + port +} + +// Start will start a long running go routine to capture +// client side metrics. Calling start multiple time will only +// start the metric listener once and will panic if a different +// client ID or port is passed in. +// +// r, err := csm.Start("clientID", "127.0.0.1:31000") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// sess := session.NewSession() +// r.InjectHandlers(sess.Handlers) +// +// svc := s3.New(sess) +// out, err := svc.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +func Start(clientID string, url string) (*Reporter, error) { + lock.Lock() + defer lock.Unlock() + + if sender == nil { + sender = newReporter(clientID, url) + } else { + if sender.clientID != clientID { + panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID)) + } + + if sender.url != url { + panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url)) + } + } + + if err := connect(url); err != nil { + sender = nil + return nil, err + } + + return sender, nil +} + +// Get will return a reporter if one exists, if one does not exist, nil will +// be returned. +func Get() *Reporter { + lock.Lock() + defer lock.Unlock() + + return sender +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go new file mode 100644 index 000000000..5bacc791a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go @@ -0,0 +1,109 @@ +package csm + +import ( + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" +) + +type metricTime time.Time + +func (t metricTime) MarshalJSON() ([]byte, error) { + ns := time.Duration(time.Time(t).UnixNano()) + return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil +} + +type metric struct { + ClientID *string `json:"ClientId,omitempty"` + API *string `json:"Api,omitempty"` + Service *string `json:"Service,omitempty"` + Timestamp *metricTime `json:"Timestamp,omitempty"` + Type *string `json:"Type,omitempty"` + Version *int `json:"Version,omitempty"` + + AttemptCount *int `json:"AttemptCount,omitempty"` + Latency *int `json:"Latency,omitempty"` + + Fqdn *string `json:"Fqdn,omitempty"` + UserAgent *string `json:"UserAgent,omitempty"` + AttemptLatency *int `json:"AttemptLatency,omitempty"` + + SessionToken *string `json:"SessionToken,omitempty"` + Region *string `json:"Region,omitempty"` + AccessKey *string `json:"AccessKey,omitempty"` + HTTPStatusCode *int `json:"HttpStatusCode,omitempty"` + XAmzID2 *string `json:"XAmzId2,omitempty"` + XAmzRequestID *string `json:"XAmznRequestId,omitempty"` + + AWSException *string `json:"AwsException,omitempty"` + AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"` + SDKException *string `json:"SdkException,omitempty"` + SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"` + + FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"` + FinalAWSException *string `json:"FinalAwsException,omitempty"` + FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"` + FinalSDKException *string `json:"FinalSdkException,omitempty"` + FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"` + + DestinationIP *string `json:"DestinationIp,omitempty"` + ConnectionReused *int `json:"ConnectionReused,omitempty"` + + AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"` + ConnectLatency *int `json:"ConnectLatency,omitempty"` + RequestLatency *int `json:"RequestLatency,omitempty"` + DNSLatency *int `json:"DnsLatency,omitempty"` + TCPLatency *int `json:"TcpLatency,omitempty"` + SSLLatency *int `json:"SslLatency,omitempty"` + + MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"` +} + +func (m *metric) TruncateFields() { + m.ClientID = truncateString(m.ClientID, 255) + m.UserAgent = truncateString(m.UserAgent, 256) + + m.AWSException = truncateString(m.AWSException, 128) + m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512) + + m.SDKException = truncateString(m.SDKException, 128) + m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512) + + m.FinalAWSException = truncateString(m.FinalAWSException, 128) + m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512) + + m.FinalSDKException = truncateString(m.FinalSDKException, 128) + m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512) +} + +func truncateString(v *string, l int) *string { + if v != nil && len(*v) > l { + nv := (*v)[:l] + return &nv + } + + return v +} + +func (m *metric) SetException(e metricException) { + switch te := e.(type) { + case awsException: + m.AWSException = aws.String(te.exception) + m.AWSExceptionMessage = aws.String(te.message) + case sdkException: + m.SDKException = aws.String(te.exception) + m.SDKExceptionMessage = aws.String(te.message) + } +} + +func (m *metric) SetFinalException(e metricException) { + switch te := e.(type) { + case awsException: + m.FinalAWSException = aws.String(te.exception) + m.FinalAWSExceptionMessage = aws.String(te.message) + case sdkException: + m.FinalSDKException = aws.String(te.exception) + m.FinalSDKExceptionMessage = aws.String(te.message) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go new file mode 100644 index 000000000..82a3e345e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go @@ -0,0 +1,55 @@ +package csm + +import ( + "sync/atomic" +) + +const ( + runningEnum = iota + pausedEnum +) + +var ( + // MetricsChannelSize of metrics to hold in the channel + MetricsChannelSize = 100 +) + +type metricChan struct { + ch chan metric + paused *int64 +} + +func newMetricChan(size int) metricChan { + return metricChan{ + ch: make(chan metric, size), + paused: new(int64), + } +} + +func (ch *metricChan) Pause() { + atomic.StoreInt64(ch.paused, pausedEnum) +} + +func (ch *metricChan) Continue() { + atomic.StoreInt64(ch.paused, runningEnum) +} + +func (ch *metricChan) IsPaused() bool { + v := atomic.LoadInt64(ch.paused) + return v == pausedEnum +} + +// Push will push metrics to the metric channel if the channel +// is not paused +func (ch *metricChan) Push(m metric) bool { + if ch.IsPaused() { + return false + } + + select { + case ch.ch <- m: + return true + default: + return false + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go new file mode 100644 index 000000000..54a99280c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go @@ -0,0 +1,26 @@ +package csm + +type metricException interface { + Exception() string + Message() string +} + +type requestException struct { + exception string + message string +} + +func (e requestException) Exception() string { + return e.exception +} +func (e requestException) Message() string { + return e.message +} + +type awsException struct { + requestException +} + +type sdkException struct { + requestException +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go new file mode 100644 index 000000000..835bcd49c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go @@ -0,0 +1,264 @@ +package csm + +import ( + "encoding/json" + "net" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Reporter will gather metrics of API requests made and +// send those metrics to the CSM endpoint. +type Reporter struct { + clientID string + url string + conn net.Conn + metricsCh metricChan + done chan struct{} +} + +var ( + sender *Reporter +) + +func connect(url string) error { + const network = "udp" + if err := sender.connect(network, url); err != nil { + return err + } + + if sender.done == nil { + sender.done = make(chan struct{}) + go sender.start() + } + + return nil +} + +func newReporter(clientID, url string) *Reporter { + return &Reporter{ + clientID: clientID, + url: url, + metricsCh: newMetricChan(MetricsChannelSize), + } +} + +func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + creds, _ := r.Config.Credentials.Get() + + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Region: r.Config.Region, + Type: aws.String("ApiCallAttempt"), + Version: aws.Int(1), + + XAmzRequestID: aws.String(r.RequestID), + + AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), + AccessKey: aws.String(creds.AccessKeyID), + } + + if r.HTTPResponse != nil { + m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + m.SetException(getMetricException(awserr)) + } + } + + m.TruncateFields() + rep.metricsCh.Push(m) +} + +func getMetricException(err awserr.Error) metricException { + msg := err.Error() + code := err.Code() + + switch code { + case request.ErrCodeRequestError, + request.ErrCodeSerialization, + request.CanceledErrorCode: + return sdkException{ + requestException{exception: code, message: msg}, + } + default: + return awsException{ + requestException{exception: code, message: msg}, + } + } +} + +func (rep *Reporter) sendAPICallMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Type: aws.String("ApiCall"), + AttemptCount: aws.Int(r.RetryCount + 1), + Region: r.Config.Region, + Latency: aws.Int(int(time.Since(r.Time) / time.Millisecond)), + XAmzRequestID: aws.String(r.RequestID), + MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())), + } + + if r.HTTPResponse != nil { + m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + m.SetFinalException(getMetricException(awserr)) + } + } + + m.TruncateFields() + + // TODO: Probably want to figure something out for logging dropped + // metrics + rep.metricsCh.Push(m) +} + +func (rep *Reporter) connect(network, url string) error { + if rep.conn != nil { + rep.conn.Close() + } + + conn, err := net.Dial(network, url) + if err != nil { + return awserr.New("UDPError", "Could not connect", err) + } + + rep.conn = conn + + return nil +} + +func (rep *Reporter) close() { + if rep.done != nil { + close(rep.done) + } + + rep.metricsCh.Pause() +} + +func (rep *Reporter) start() { + defer func() { + rep.metricsCh.Pause() + }() + + for { + select { + case <-rep.done: + rep.done = nil + return + case m := <-rep.metricsCh.ch: + // TODO: What to do with this error? Probably should just log + b, err := json.Marshal(m) + if err != nil { + continue + } + + rep.conn.Write(b) + } + } +} + +// Pause will pause the metric channel preventing any new metrics from being +// added. It is safe to call concurrently with other calls to Pause, but if +// called concurently with Continue can lead to unexpected state. +func (rep *Reporter) Pause() { + lock.Lock() + defer lock.Unlock() + + if rep == nil { + return + } + + rep.close() +} + +// Continue will reopen the metric channel and allow for monitoring to be +// resumed. It is safe to call concurrently with other calls to Continue, but +// if called concurently with Pause can lead to unexpected state. +func (rep *Reporter) Continue() { + lock.Lock() + defer lock.Unlock() + if rep == nil { + return + } + + if !rep.metricsCh.IsPaused() { + return + } + + rep.metricsCh.Continue() +} + +// Client side metric handler names +const ( + APICallMetricHandlerName = "awscsm.SendAPICallMetric" + APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" +) + +// InjectHandlers will will enable client side metrics and inject the proper +// handlers to handle how metrics are sent. +// +// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers +// multiple times may lead to unexpected behavior, (e.g. duplicate metrics). +// +// // Start must be called in order to inject the correct handlers +// r, err := csm.Start("clientID", "127.0.0.1:8094") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// +// sess := session.NewSession() +// r.InjectHandlers(&sess.Handlers) +// +// // create a new service client with our client side metric session +// svc := s3.New(sess) +func (rep *Reporter) InjectHandlers(handlers *request.Handlers) { + if rep == nil { + return + } + + handlers.Complete.PushFrontNamed(request.NamedHandler{ + Name: APICallMetricHandlerName, + Fn: rep.sendAPICallMetric, + }) + + handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{ + Name: APICallAttemptMetricHandlerName, + Fn: rep.sendAPICallAttemptMetric, + }) +} + +// boolIntValue return 1 for true and 0 for false. +func boolIntValue(b bool) int { + if b { + return 1 + } + + return 0 +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go new file mode 100644 index 000000000..1ba80b576 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -0,0 +1,252 @@ +// Package defaults is a collection of helpers to retrieve the SDK's default +// configuration and handlers. +// +// Generally this package shouldn't be used directly, but session.Session +// instead. This package is useful when you need to reset the defaults +// of a session or service client to the SDK defaults before setting +// additional parameters. +package defaults + +import ( + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// A Defaults provides a collection of default values for SDK clients. +type Defaults struct { + Config *aws.Config + Handlers request.Handlers +} + +// Get returns the SDK's default values with Config and handlers pre-configured. +func Get() Defaults { + cfg := Config() + handlers := Handlers() + cfg.Credentials = CredChain(cfg, handlers) + + return Defaults{ + Config: cfg, + Handlers: handlers, + } +} + +// Config returns the default configuration without credentials. +// To retrieve a config with credentials also included use +// `defaults.Get().Config` instead. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the configuration of an +// existing service client or session. +func Config() *aws.Config { + return aws.NewConfig(). + WithCredentials(credentials.AnonymousCredentials). + WithRegion(os.Getenv("AWS_REGION")). + WithHTTPClient(http.DefaultClient). + WithMaxRetries(aws.UseServiceDefaultRetries). + WithLogger(aws.NewDefaultLogger()). + WithLogLevel(aws.LogOff). + WithEndpointResolver(endpoints.DefaultResolver()) +} + +// Handlers returns the default request handlers. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the request handlers of an +// existing service client or session. +func Handlers() request.Handlers { + var handlers request.Handlers + + handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + handlers.Validate.AfterEachFn = request.HandlerListStopOnError + handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.PushBackNamed(corehandlers.AddAwsInternal) + handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander) + handlers.Build.AfterEachFn = request.HandlerListStopOnError + handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) + handlers.Send.PushBackNamed(corehandlers.SendHandler) + handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) + + return handlers +} + +// CredChain returns the default credential chain. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the credentials of an +// existing service client or session's Config. +func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { + return credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: CredProviders(cfg, handlers), + }) +} + +// CredProviders returns the slice of providers used in +// the default credential chain. +// +// For applications that need to use some other provider (for example use +// different environment variables for legacy reasons) but still fall back +// on the default chain of providers. This allows that default chaint to be +// automatically updated +func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider { + return []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + RemoteCredProvider(*cfg, handlers), + } +} + +const ( + httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" + httpProviderAuthFileEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE" + httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" +) + +// direct representation of the IPv4 address for the ECS container +// "169.254.170.2" +var ecsContainerIPv4 net.IP = []byte{ + 169, 254, 170, 2, +} + +// direct representation of the IPv4 address for the EKS container +// "169.254.170.23" +var eksContainerIPv4 net.IP = []byte{ + 169, 254, 170, 23, +} + +// direct representation of the IPv6 address for the EKS container +// "fd00:ec2::23" +var eksContainerIPv6 net.IP = []byte{ + 0xFD, 0, 0xE, 0xC2, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0x23, +} + +// RemoteCredProvider returns a credentials provider for the default remote +// endpoints such as EC2 or ECS Roles. +func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + if u := os.Getenv(httpProviderEnvVar); len(u) > 0 { + return localHTTPCredProvider(cfg, handlers, u) + } + + if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 { + u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri) + return httpCredProvider(cfg, handlers, u) + } + + return ec2RoleProvider(cfg, handlers) +} + +var lookupHostFn = net.LookupHost + +// isAllowedHost allows host to be loopback or known ECS/EKS container IPs +// +// host can either be an IP address OR an unresolved hostname - resolution will +// be automatically performed in the latter case +func isAllowedHost(host string) (bool, error) { + if ip := net.ParseIP(host); ip != nil { + return isIPAllowed(ip), nil + } + + addrs, err := lookupHostFn(host) + if err != nil { + return false, err + } + + for _, addr := range addrs { + if ip := net.ParseIP(addr); ip == nil || !isIPAllowed(ip) { + return false, nil + } + } + + return true, nil +} + +func isIPAllowed(ip net.IP) bool { + return ip.IsLoopback() || + ip.Equal(ecsContainerIPv4) || + ip.Equal(eksContainerIPv4) || + ip.Equal(eksContainerIPv6) +} + +func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + var errMsg string + + parsed, err := url.Parse(u) + if err != nil { + errMsg = fmt.Sprintf("invalid URL, %v", err) + } else { + host := aws.URLHostname(parsed) + if len(host) == 0 { + errMsg = "unable to parse host from local HTTP cred provider URL" + } else if parsed.Scheme == "http" { + if isAllowedHost, allowHostErr := isAllowedHost(host); allowHostErr != nil { + errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, allowHostErr) + } else if !isAllowedHost { + errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback/ecs/eks hosts are allowed.", host) + } + } + } + + if len(errMsg) > 0 { + if cfg.Logger != nil { + cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err) + } + return credentials.ErrorProvider{ + Err: awserr.New("CredentialsEndpointError", errMsg, err), + ProviderName: endpointcreds.ProviderName, + } + } + + return httpCredProvider(cfg, handlers, u) +} + +func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + return endpointcreds.NewProviderClient(cfg, handlers, u, + func(p *endpointcreds.Provider) { + p.ExpiryWindow = 5 * time.Minute + p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar) + if authFilePath := os.Getenv(httpProviderAuthFileEnvVar); authFilePath != "" { + p.AuthorizationTokenProvider = endpointcreds.TokenProviderFunc(func() (string, error) { + if contents, err := ioutil.ReadFile(authFilePath); err != nil { + return "", fmt.Errorf("failed to read authorization token from %v: %v", authFilePath, err) + } else { + return string(contents), nil + } + }) + } + }, + ) +} + +func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + resolver := cfg.EndpointResolver + if resolver == nil { + resolver = endpoints.DefaultResolver() + } + + e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "") + return &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion), + ExpiryWindow: 5 * time.Minute, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go new file mode 100644 index 000000000..ca0ee1dcc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go @@ -0,0 +1,27 @@ +package defaults + +import ( + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return shareddefaults.SharedCredentialsFilename() +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return shareddefaults.SharedConfigFilename() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/doc.go new file mode 100644 index 000000000..4fcb61618 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/doc.go @@ -0,0 +1,56 @@ +// Package aws provides the core SDK's utilities and shared types. Use this package's +// utilities to simplify setting and reading API operations parameters. +// +// Value and Pointer Conversion Utilities +// +// This package includes a helper conversion utility for each scalar type the SDK's +// API use. These utilities make getting a pointer of the scalar, and dereferencing +// a pointer easier. +// +// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. +// The Pointer to value will safely dereference the pointer and return its value. +// If the pointer was nil, the scalar's zero value will be returned. +// +// The value to pointer functions will be named after the scalar type. So get a +// *string from a string value use the "String" function. This makes it easy to +// to get pointer of a literal string value, because getting the address of a +// literal requires assigning the value to a variable first. +// +// var strPtr *string +// +// // Without the SDK's conversion functions +// str := "my string" +// strPtr = &str +// +// // With the SDK's conversion functions +// strPtr = aws.String("my string") +// +// // Convert *string to string value +// str = aws.StringValue(strPtr) +// +// In addition to scalars the aws package also includes conversion utilities for +// map and slice for commonly types used in API parameters. The map and slice +// conversion functions use similar naming pattern as the scalar conversion +// functions. +// +// var strPtrs []*string +// var strs []string = []string{"Go", "Gophers", "Go"} +// +// // Convert []string to []*string +// strPtrs = aws.StringSlice(strs) +// +// // Convert []*string to []string +// strs = aws.StringValueSlice(strPtrs) +// +// SDK Default HTTP Client +// +// The SDK will use the http.DefaultClient if a HTTP client is not provided to +// the SDK's Session, or service client constructor. This means that if the +// http.DefaultClient is modified by other components of your application the +// modifications will be picked up by the SDK as well. +// +// In some cases this might be intended, but it is a better practice to create +// a custom HTTP Client to share explicitly through your application. You can +// configure the SDK to use the custom HTTP Client by setting the HTTPClient +// value of the SDK's Config type when creating a Session or service client. +package aws diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go new file mode 100644 index 000000000..69fa63dc0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -0,0 +1,250 @@ +package ec2metadata + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkuri" +) + +// getToken uses the duration to return a token for EC2 metadata service, +// or an error if the request failed. +func (c *EC2Metadata) getToken(ctx aws.Context, duration time.Duration) (tokenOutput, error) { + op := &request.Operation{ + Name: "GetToken", + HTTPMethod: "PUT", + HTTPPath: "/latest/api/token", + } + + var output tokenOutput + req := c.NewRequest(op, nil, &output) + req.SetContext(ctx) + + // remove the fetch token handler from the request handlers to avoid infinite recursion + req.Handlers.Sign.RemoveByName(fetchTokenHandlerName) + + // Swap the unmarshalMetadataHandler with unmarshalTokenHandler on this request. + req.Handlers.Unmarshal.Swap(unmarshalMetadataHandlerName, unmarshalTokenHandler) + + ttl := strconv.FormatInt(int64(duration/time.Second), 10) + req.HTTPRequest.Header.Set(ttlHeader, ttl) + + err := req.Send() + + // Errors with bad request status should be returned. + if err != nil { + err = awserr.NewRequestFailure( + awserr.New(req.HTTPResponse.Status, http.StatusText(req.HTTPResponse.StatusCode), err), + req.HTTPResponse.StatusCode, req.RequestID) + } + + return output, err +} + +// GetMetadata uses the path provided to request information from the EC2 +// instance metadata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadata(p string) (string, error) { + return c.GetMetadataWithContext(aws.BackgroundContext(), p) +} + +// GetMetadataWithContext uses the path provided to request information from the EC2 +// instance metadata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadataWithContext(ctx aws.Context, p string) (string, error) { + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: sdkuri.PathJoin("/latest/meta-data", p), + } + output := &metadataOutput{} + + req := c.NewRequest(op, nil, output) + + req.SetContext(ctx) + + err := req.Send() + return output.Content, err +} + +// GetUserData returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserData() (string, error) { + return c.GetUserDataWithContext(aws.BackgroundContext()) +} + +// GetUserDataWithContext returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserDataWithContext(ctx aws.Context) (string, error) { + op := &request.Operation{ + Name: "GetUserData", + HTTPMethod: "GET", + HTTPPath: "/latest/user-data", + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + req.SetContext(ctx) + + err := req.Send() + return output.Content, err +} + +// GetDynamicData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicData(p string) (string, error) { + return c.GetDynamicDataWithContext(aws.BackgroundContext(), p) +} + +// GetDynamicDataWithContext uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicDataWithContext(ctx aws.Context, p string) (string, error) { + op := &request.Operation{ + Name: "GetDynamicData", + HTTPMethod: "GET", + HTTPPath: sdkuri.PathJoin("/latest/dynamic", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + req.SetContext(ctx) + + err := req.Send() + return output.Content, err +} + +// GetInstanceIdentityDocument retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { + return c.GetInstanceIdentityDocumentWithContext(aws.BackgroundContext()) +} + +// GetInstanceIdentityDocumentWithContext retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocumentWithContext(ctx aws.Context) (EC2InstanceIdentityDocument, error) { + resp, err := c.GetDynamicDataWithContext(ctx, "instance-identity/document") + if err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 instance identity document", err) + } + + doc := EC2InstanceIdentityDocument{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New(request.ErrCodeSerialization, + "failed to decode EC2 instance identity document", err) + } + + return doc, nil +} + +// IAMInfo retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { + return c.IAMInfoWithContext(aws.BackgroundContext()) +} + +// IAMInfoWithContext retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfoWithContext(ctx aws.Context) (EC2IAMInfo, error) { + resp, err := c.GetMetadataWithContext(ctx, "iam/info") + if err != nil { + return EC2IAMInfo{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 IAM info", err) + } + + info := EC2IAMInfo{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { + return EC2IAMInfo{}, + awserr.New(request.ErrCodeSerialization, + "failed to decode EC2 IAM info", err) + } + + if info.Code != "Success" { + errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) + return EC2IAMInfo{}, + awserr.New("EC2MetadataError", errMsg, nil) + } + + return info, nil +} + +// Region returns the region the instance is running in. +func (c *EC2Metadata) Region() (string, error) { + return c.RegionWithContext(aws.BackgroundContext()) +} + +// RegionWithContext returns the region the instance is running in. +func (c *EC2Metadata) RegionWithContext(ctx aws.Context) (string, error) { + ec2InstanceIdentityDocument, err := c.GetInstanceIdentityDocumentWithContext(ctx) + if err != nil { + return "", err + } + // extract region from the ec2InstanceIdentityDocument + region := ec2InstanceIdentityDocument.Region + if len(region) == 0 { + return "", awserr.New("EC2MetadataError", "invalid region received for ec2metadata instance", nil) + } + // returns region + return region, nil +} + +// Available returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) Available() bool { + return c.AvailableWithContext(aws.BackgroundContext()) +} + +// AvailableWithContext returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) AvailableWithContext(ctx aws.Context) bool { + if _, err := c.GetMetadataWithContext(ctx, "instance-id"); err != nil { + return false + } + + return true +} + +// An EC2IAMInfo provides the shape for unmarshaling +// an IAM info from the metadata API +type EC2IAMInfo struct { + Code string + LastUpdated time.Time + InstanceProfileArn string + InstanceProfileID string +} + +// An EC2InstanceIdentityDocument provides the shape for unmarshaling +// an instance identity document +type EC2InstanceIdentityDocument struct { + DevpayProductCodes []string `json:"devpayProductCodes"` + MarketplaceProductCodes []string `json:"marketplaceProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go new file mode 100644 index 000000000..f4cc8751d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -0,0 +1,245 @@ +// Package ec2metadata provides the client for making API calls to the +// EC2 Metadata service. +// +// This package's client can be disabled completely by setting the environment +// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to +// true instructs the SDK to disable the EC2 Metadata client. The client cannot +// be used while the environment variable is set to true, (case insensitive). +// +// The endpoint of the EC2 IMDS client can be configured via the environment +// variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a +// Session. See aws/session#Options.EC2IMDSEndpoint for more details. +package ec2metadata + +import ( + "bytes" + "io" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + // ServiceName is the name of the service. + ServiceName = "ec2metadata" + disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" + + // Headers for Token and TTL + ttlHeader = "x-aws-ec2-metadata-token-ttl-seconds" + tokenHeader = "x-aws-ec2-metadata-token" + + // Named Handler constants + fetchTokenHandlerName = "FetchTokenHandler" + unmarshalMetadataHandlerName = "unmarshalMetadataHandler" + unmarshalTokenHandlerName = "unmarshalTokenHandler" + enableTokenProviderHandlerName = "enableTokenProviderHandler" + + // TTL constants + defaultTTL = 21600 * time.Second + ttlExpirationWindow = 30 * time.Second +) + +// A EC2Metadata is an EC2 Metadata service Client. +type EC2Metadata struct { + *client.Client +} + +// New creates a new instance of the EC2Metadata client with a session. +// This client is safe to use across multiple goroutines. +// +// Example: +// +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { + c := p.ClientConfig(ServiceName, cfgs...) + return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// NewClient returns a new EC2Metadata client. Should be used to create +// a client when not using a session. Generally using just New with a session +// is preferred. +// +// Will remove the URL path from the endpoint provided to ensure the EC2 IMDS +// client is able to communicate with the EC2 IMDS API. +// +// If an unmodified HTTP client is provided from the stdlib default, or no client +// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. +// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. +func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { + if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { + // If the http client is unmodified and this feature is not disabled + // set custom timeouts for EC2Metadata requests. + cfg.HTTPClient = &http.Client{ + // use a shorter timeout than default because the metadata + // service is local if it is running, and to fail faster + // if not running on an ec2 instance. + Timeout: 1 * time.Second, + } + // max number of retries on the client operation + cfg.MaxRetries = aws.Int(2) + } + + if u, err := url.Parse(endpoint); err == nil { + // Remove path from the endpoint since it will be added by requests. + // This is an artifact of the SDK adding `/latest` to the endpoint for + // EC2 IMDS, but this is now moved to the operation definition. + u.Path = "" + u.RawPath = "" + endpoint = u.String() + } + + svc := &EC2Metadata{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceName, + Endpoint: endpoint, + APIVersion: "latest", + }, + handlers, + ), + } + + // token provider instance + tp := newTokenProvider(svc, defaultTTL) + + // NamedHandler for fetching token + svc.Handlers.Sign.PushBackNamed(request.NamedHandler{ + Name: fetchTokenHandlerName, + Fn: tp.fetchTokenHandler, + }) + // NamedHandler for enabling token provider + svc.Handlers.Complete.PushBackNamed(request.NamedHandler{ + Name: enableTokenProviderHandlerName, + Fn: tp.enableTokenProviderHandler, + }) + + svc.Handlers.Unmarshal.PushBackNamed(unmarshalHandler) + svc.Handlers.UnmarshalError.PushBack(unmarshalError) + svc.Handlers.Validate.Clear() + svc.Handlers.Validate.PushBack(validateEndpointHandler) + + // Disable the EC2 Metadata service if the environment variable is set. + // This short-circuits the service's functionality to always fail to send + // requests. + if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { + svc.Handlers.Send.SwapNamed(request.NamedHandler{ + Name: corehandlers.SendHandler.Name, + Fn: func(r *request.Request) { + r.HTTPResponse = &http.Response{ + Header: http.Header{}, + } + r.Error = awserr.New( + request.CanceledErrorCode, + "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var", + nil) + }, + }) + } + + // Add additional options to the service config + for _, option := range opts { + option(svc.Client) + } + return svc +} + +func httpClientZero(c *http.Client) bool { + return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) +} + +type metadataOutput struct { + Content string +} + +type tokenOutput struct { + Token string + TTL time.Duration +} + +// unmarshal token handler is used to parse the response of a getToken operation +var unmarshalTokenHandler = request.NamedHandler{ + Name: unmarshalTokenHandlerName, + Fn: func(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, + "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + + v := r.HTTPResponse.Header.Get(ttlHeader) + data, ok := r.Data.(*tokenOutput) + if !ok { + return + } + + data.Token = b.String() + // TTL is in seconds + i, err := strconv.ParseInt(v, 10, 64) + if err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ParamFormatErrCode, + "unable to parse EC2 token TTL response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + t := time.Duration(i) * time.Second + data.TTL = t + }, +} + +var unmarshalHandler = request.NamedHandler{ + Name: unmarshalMetadataHandlerName, + Fn: func(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, + "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + + if data, ok := r.Data.(*metadataOutput); ok { + data.Content = b.String() + } + }, +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err), + r.HTTPResponse.StatusCode, r.RequestID) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.NewRequestFailure( + awserr.New("EC2MetadataError", "failed to make EC2Metadata request\n"+b.String(), nil), + r.HTTPResponse.StatusCode, r.RequestID) +} + +func validateEndpointHandler(r *request.Request) { + if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go new file mode 100644 index 000000000..f1f9ba4ec --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go @@ -0,0 +1,99 @@ +package ec2metadata + +import ( + "fmt" + "github.com/aws/aws-sdk-go/aws" + "net/http" + "sync/atomic" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A tokenProvider struct provides access to EC2Metadata client +// and atomic instance of a token, along with configuredTTL for it. +// tokenProvider also provides an atomic flag to disable the +// fetch token operation. +// The disabled member will use 0 as false, and 1 as true. +type tokenProvider struct { + client *EC2Metadata + token atomic.Value + configuredTTL time.Duration + disabled uint32 +} + +// A ec2Token struct helps use of token in EC2 Metadata service ops +type ec2Token struct { + token string + credentials.Expiry +} + +// newTokenProvider provides a pointer to a tokenProvider instance +func newTokenProvider(c *EC2Metadata, duration time.Duration) *tokenProvider { + return &tokenProvider{client: c, configuredTTL: duration} +} + +// check if fallback is enabled +func (t *tokenProvider) fallbackEnabled() bool { + return t.client.Config.EC2MetadataEnableFallback == nil || *t.client.Config.EC2MetadataEnableFallback +} + +// fetchTokenHandler fetches token for EC2Metadata service client by default. +func (t *tokenProvider) fetchTokenHandler(r *request.Request) { + // short-circuits to insecure data flow if tokenProvider is disabled. + if v := atomic.LoadUint32(&t.disabled); v == 1 && t.fallbackEnabled() { + return + } + + if ec2Token, ok := t.token.Load().(ec2Token); ok && !ec2Token.IsExpired() { + r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) + return + } + + output, err := t.client.getToken(r.Context(), t.configuredTTL) + + if err != nil { + // only attempt fallback to insecure data flow if IMDSv1 is enabled + if !t.fallbackEnabled() { + r.Error = awserr.New("EC2MetadataError", "failed to get IMDSv2 token and fallback to IMDSv1 is disabled", err) + return + } + + // change the disabled flag on token provider to true and fallback + if requestFailureError, ok := err.(awserr.RequestFailure); ok { + switch requestFailureError.StatusCode() { + case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed: + atomic.StoreUint32(&t.disabled, 1) + if t.client.Config.LogLevel.Matches(aws.LogDebugWithDeprecated) { + t.client.Config.Logger.Log(fmt.Sprintf("WARN: failed to get session token, falling back to IMDSv1: %v", requestFailureError)) + } + case http.StatusBadRequest: + r.Error = requestFailureError + } + } + return + } + + newToken := ec2Token{ + token: output.Token, + } + newToken.SetExpiration(time.Now().Add(output.TTL), ttlExpirationWindow) + t.token.Store(newToken) + + // Inject token header to the request. + if ec2Token, ok := t.token.Load().(ec2Token); ok { + r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) + } +} + +// enableTokenProviderHandler enables the token provider +func (t *tokenProvider) enableTokenProviderHandler(r *request.Request) { + // If the error code status is 401, we enable the token provider + if e, ok := r.Error.(awserr.RequestFailure); ok && e != nil && + e.StatusCode() == http.StatusUnauthorized { + t.token.Store(ec2Token{}) + atomic.StoreUint32(&t.disabled, 0) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go new file mode 100644 index 000000000..cad3b9a48 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -0,0 +1,193 @@ +package endpoints + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +type modelDefinition map[string]json.RawMessage + +// A DecodeModelOptions are the options for how the endpoints model definition +// are decoded. +type DecodeModelOptions struct { + SkipCustomizations bool +} + +// Set combines all of the option functions together. +func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// DecodeModel unmarshals a Regions and Endpoint model definition file into +// a endpoint Resolver. If the file format is not supported, or an error occurs +// when unmarshaling the model an error will be returned. +// +// Casting the return value of this func to a EnumPartitions will +// allow you to get a list of the partitions in the order the endpoints +// will be resolved in. +// +// resolver, err := endpoints.DecodeModel(reader) +// +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// for _, p := range partitions { +// // ... inspect partitions +// } +func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) { + var opts DecodeModelOptions + opts.Set(optFns...) + + // Get the version of the partition file to determine what + // unmarshaling model to use. + modelDef := modelDefinition{} + if err := json.NewDecoder(r).Decode(&modelDef); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + var version string + if b, ok := modelDef["version"]; ok { + version = string(b) + } else { + return nil, newDecodeModelError("endpoints version not found in model", nil) + } + + if version == "3" { + return decodeV3Endpoints(modelDef, opts) + } + + return nil, newDecodeModelError( + fmt.Sprintf("endpoints version %s, not supported", version), nil) +} + +func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) { + b, ok := modelDef["partitions"] + if !ok { + return nil, newDecodeModelError("endpoints model missing partitions", nil) + } + + ps := partitions{} + if err := json.Unmarshal(b, &ps); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + if opts.SkipCustomizations { + return ps, nil + } + + // Customization + for i := 0; i < len(ps); i++ { + p := &ps[i] + custRegionalS3(p) + custRmIotDataService(p) + custFixAppAutoscalingChina(p) + custFixAppAutoscalingUsGov(p) + } + + return ps, nil +} + +func custRegionalS3(p *partition) { + if p.ID != "aws" { + return + } + + service, ok := p.Services["s3"] + if !ok { + return + } + + const awsGlobal = "aws-global" + const usEast1 = "us-east-1" + + // If global endpoint already exists no customization needed. + if _, ok := service.Endpoints[endpointKey{Region: awsGlobal}]; ok { + return + } + + service.PartitionEndpoint = awsGlobal + if _, ok := service.Endpoints[endpointKey{Region: usEast1}]; !ok { + service.Endpoints[endpointKey{Region: usEast1}] = endpoint{} + } + service.Endpoints[endpointKey{Region: awsGlobal}] = endpoint{ + Hostname: "s3.amazonaws.com", + CredentialScope: credentialScope{ + Region: usEast1, + }, + } + + p.Services["s3"] = service +} + +func custRmIotDataService(p *partition) { + delete(p.Services, "data.iot") +} + +func custFixAppAutoscalingChina(p *partition) { + if p.ID != "aws-cn" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + const expectHostname = `autoscaling.{region}.amazonaws.com` + serviceDefault := s.Defaults[defaultKey{}] + if e, a := expectHostname, serviceDefault.Hostname; e != a { + fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a) + return + } + serviceDefault.Hostname = expectHostname + ".cn" + s.Defaults[defaultKey{}] = serviceDefault + p.Services[serviceName] = s +} + +func custFixAppAutoscalingUsGov(p *partition) { + if p.ID != "aws-us-gov" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + serviceDefault := s.Defaults[defaultKey{}] + if a := serviceDefault.CredentialScope.Service; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a) + return + } + + if a := serviceDefault.Hostname; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a) + return + } + + serviceDefault.CredentialScope.Service = "application-autoscaling" + serviceDefault.Hostname = "autoscaling.{region}.amazonaws.com" + + if s.Defaults == nil { + s.Defaults = make(endpointDefaults) + } + + s.Defaults[defaultKey{}] = serviceDefault + + p.Services[serviceName] = s +} + +type decodeModelError struct { + awsError +} + +func newDecodeModelError(msg string, err error) decodeModelError { + return decodeModelError{ + awsError: awserr.New("DecodeEndpointsModelError", msg, err), + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go new file mode 100644 index 000000000..c3516e018 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -0,0 +1,48609 @@ +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + +// Partition identifiers +const ( + AwsPartitionID = "aws" // AWS Standard partition. + AwsCnPartitionID = "aws-cn" // AWS China partition. + AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. + AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition. + AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition. + AwsIsoEPartitionID = "aws-iso-e" // AWS ISOE (Europe) partition. + AwsIsoFPartitionID = "aws-iso-f" // AWS ISOF partition. +) + +// AWS Standard partition's regions. +const ( + AfSouth1RegionID = "af-south-1" // Africa (Cape Town). + ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong). + ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). + ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). + ApNortheast3RegionID = "ap-northeast-3" // Asia Pacific (Osaka). + ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). + ApSouth2RegionID = "ap-south-2" // Asia Pacific (Hyderabad). + ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). + ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). + ApSoutheast3RegionID = "ap-southeast-3" // Asia Pacific (Jakarta). + ApSoutheast4RegionID = "ap-southeast-4" // Asia Pacific (Melbourne). + CaCentral1RegionID = "ca-central-1" // Canada (Central). + CaWest1RegionID = "ca-west-1" // Canada West (Calgary). + EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt). + EuCentral2RegionID = "eu-central-2" // Europe (Zurich). + EuNorth1RegionID = "eu-north-1" // Europe (Stockholm). + EuSouth1RegionID = "eu-south-1" // Europe (Milan). + EuSouth2RegionID = "eu-south-2" // Europe (Spain). + EuWest1RegionID = "eu-west-1" // Europe (Ireland). + EuWest2RegionID = "eu-west-2" // Europe (London). + EuWest3RegionID = "eu-west-3" // Europe (Paris). + IlCentral1RegionID = "il-central-1" // Israel (Tel Aviv). + MeCentral1RegionID = "me-central-1" // Middle East (UAE). + MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). + SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). + UsEast1RegionID = "us-east-1" // US East (N. Virginia). + UsEast2RegionID = "us-east-2" // US East (Ohio). + UsWest1RegionID = "us-west-1" // US West (N. California). + UsWest2RegionID = "us-west-2" // US West (Oregon). +) + +// AWS China partition's regions. +const ( + CnNorth1RegionID = "cn-north-1" // China (Beijing). + CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia). +) + +// AWS GovCloud (US) partition's regions. +const ( + UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East). + UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US-West). +) + +// AWS ISO (US) partition's regions. +const ( + UsIsoEast1RegionID = "us-iso-east-1" // US ISO East. + UsIsoWest1RegionID = "us-iso-west-1" // US ISO WEST. +) + +// AWS ISOB (US) partition's regions. +const ( + UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio). +) + +// AWS ISOE (Europe) partition's regions. +const ( + EuIsoeWest1RegionID = "eu-isoe-west-1" // EU ISOE West. +) + +// AWS ISOF partition's regions. +const () + +// DefaultResolver returns an Endpoint resolver that will be able +// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), AWS ISOB (US), AWS ISOE (Europe), and AWS ISOF. +// +// Use DefaultPartitions() to get the list of the default partitions. +func DefaultResolver() Resolver { + return defaultPartitions +} + +// DefaultPartitions returns a list of the partitions the SDK is bundled +// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), AWS ISOB (US), AWS ISOE (Europe), and AWS ISOF. +// +// partitions := endpoints.DefaultPartitions +// for _, p := range partitions { +// // ... inspect partitions +// } +func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() +} + +var defaultPartitions = partitions{ + awsPartition, + awscnPartition, + awsusgovPartition, + awsisoPartition, + awsisobPartition, + awsisoePartition, + awsisofPartition, +} + +// AwsPartition returns the Resolver for AWS Standard. +func AwsPartition() Partition { + return awsPartition.Partition() +} + +var awsPartition = partition{ + ID: "aws", + Name: "AWS Standard", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{ + "af-south-1": region{ + Description: "Africa (Cape Town)", + }, + "ap-east-1": region{ + Description: "Asia Pacific (Hong Kong)", + }, + "ap-northeast-1": region{ + Description: "Asia Pacific (Tokyo)", + }, + "ap-northeast-2": region{ + Description: "Asia Pacific (Seoul)", + }, + "ap-northeast-3": region{ + Description: "Asia Pacific (Osaka)", + }, + "ap-south-1": region{ + Description: "Asia Pacific (Mumbai)", + }, + "ap-south-2": region{ + Description: "Asia Pacific (Hyderabad)", + }, + "ap-southeast-1": region{ + Description: "Asia Pacific (Singapore)", + }, + "ap-southeast-2": region{ + Description: "Asia Pacific (Sydney)", + }, + "ap-southeast-3": region{ + Description: "Asia Pacific (Jakarta)", + }, + "ap-southeast-4": region{ + Description: "Asia Pacific (Melbourne)", + }, + "ca-central-1": region{ + Description: "Canada (Central)", + }, + "ca-west-1": region{ + Description: "Canada West (Calgary)", + }, + "eu-central-1": region{ + Description: "Europe (Frankfurt)", + }, + "eu-central-2": region{ + Description: "Europe (Zurich)", + }, + "eu-north-1": region{ + Description: "Europe (Stockholm)", + }, + "eu-south-1": region{ + Description: "Europe (Milan)", + }, + "eu-south-2": region{ + Description: "Europe (Spain)", + }, + "eu-west-1": region{ + Description: "Europe (Ireland)", + }, + "eu-west-2": region{ + Description: "Europe (London)", + }, + "eu-west-3": region{ + Description: "Europe (Paris)", + }, + "il-central-1": region{ + Description: "Israel (Tel Aviv)", + }, + "me-central-1": region{ + Description: "Middle East (UAE)", + }, + "me-south-1": region{ + Description: "Middle East (Bahrain)", + }, + "sa-east-1": region{ + Description: "South America (Sao Paulo)", + }, + "us-east-1": region{ + Description: "US East (N. Virginia)", + }, + "us-east-2": region{ + Description: "US East (Ohio)", + }, + "us-west-1": region{ + Description: "US West (N. California)", + }, + "us-west-2": region{ + Description: "US West (Oregon)", + }, + }, + Services: services{ + "access-analyzer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "access-analyzer-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "access-analyzer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "access-analyzer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "access-analyzer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "access-analyzer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.us-west-2.amazonaws.com", + }, + }, + }, + "account": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "account.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "acm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "acm-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "acm-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "acm-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "acm-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "acm-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "acm-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "acm-pca": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "acm-pca-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "acm-pca-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "acm-pca-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "acm-pca-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "acm-pca-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.us-west-2.amazonaws.com", + }, + }, + }, + "agreement-marketplace": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "airflow": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "amplify": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "amplifybackend": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "amplifyuibuilder": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "aoss": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "api.detective": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "api.detective-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "api.detective-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "api.detective-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "api.detective-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "api.detective-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "api.ecr": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "api.ecr.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "api.ecr.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "api.ecr.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "api.ecr.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "api.ecr.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "api.ecr.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "api.ecr.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "api.ecr.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "api.ecr.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "api.ecr.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "api.ecr.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "api.ecr.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "api.ecr.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "dkr-us-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-west-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "api.ecr.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "api.ecr.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "api.ecr.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "api.ecr.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "api.ecr.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "api.ecr.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "api.ecr.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "api.ecr.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "fips-dkr-us-east-1", + }: endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-dkr-us-east-2", + }: endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-dkr-us-west-1", + }: endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-dkr-us-west-2", + }: endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "api.ecr.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "api.ecr.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "api.ecr.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "api.ecr.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.ecr.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "api.ecr.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "api.ecr.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.ecr.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "api.ecr-public": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.ecr-public.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.ecr-public.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "api.elastic-inference": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "api.elastic-inference.ap-northeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "api.elastic-inference.ap-northeast-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "api.elastic-inference.eu-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.elastic-inference.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "api.elastic-inference.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.elastic-inference.us-west-2.amazonaws.com", + }, + }, + }, + "api.fleethub.iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.fleethub.iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "api.fleethub.iot-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "api.fleethub.iot-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "api.fleethub.iot-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "api.fleethub.iot-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.fleethub.iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.fleethub.iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.fleethub.iot-fips.us-west-2.amazonaws.com", + }, + }, + }, + "api.iotdeviceadvisor": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "api.iotdeviceadvisor.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "api.iotdeviceadvisor.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.iotdeviceadvisor.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.iotdeviceadvisor.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "api.iotwireless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "api.iotwireless.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "api.iotwireless.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "api.iotwireless.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "api.iotwireless.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "api.iotwireless.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.iotwireless.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.iotwireless.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "api.mediatailor": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "api.pricing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "api.sagemaker": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "api.tunneling.iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", + }, + }, + }, + "apigateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "apigateway-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "apigateway-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "apigateway-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "apigateway-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "apigateway-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "apigateway-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.us-west-2.amazonaws.com", + }, + }, + }, + "app-integrations": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "appconfig": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "appflow": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "appflow-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "appflow-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "appflow-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "appflow-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-west-2.amazonaws.com", + }, + }, + }, + "application-autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "applicationinsights": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "appmesh": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appmesh-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "appmesh-fips.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "appmesh-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "appmesh-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "appmesh-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "appmesh-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "appmesh-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "apprunner": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "apprunner-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "apprunner-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "apprunner-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apprunner-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apprunner-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apprunner-fips.us-west-2.amazonaws.com", + }, + }, + }, + "appstream2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "appstream2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appstream2-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "appstream2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appstream2-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "appstream2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "appsync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "aps": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "athena": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-west-3.api.aws", + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "athena-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "athena-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "athena-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "athena-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-west-2.api.aws", + }, + }, + }, + "auditmanager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "auditmanager-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "auditmanager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "auditmanager-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "auditmanager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "auditmanager-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "auditmanager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "auditmanager-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "auditmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "autoscaling-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "autoscaling-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "autoscaling-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "autoscaling-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "autoscaling-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "autoscaling-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.us-west-2.amazonaws.com", + }, + }, + }, + "autoscaling-plans": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "backup": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "backup-gateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "batch": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.batch.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "fips.batch.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "fips.batch.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "fips.batch.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "fips.batch.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.batch.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.batch.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.batch.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.batch.us-west-2.amazonaws.com", + }, + }, + }, + "bedrock": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "bedrock-ap-northeast-1", + }: endpoint{ + Hostname: "bedrock.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "bedrock-ap-south-1", + }: endpoint{ + Hostname: "bedrock.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "bedrock-ap-southeast-1", + }: endpoint{ + Hostname: "bedrock.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "bedrock-ap-southeast-2", + }: endpoint{ + Hostname: "bedrock.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "bedrock-ca-central-1", + }: endpoint{ + Hostname: "bedrock.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "bedrock-eu-central-1", + }: endpoint{ + Hostname: "bedrock.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "bedrock-eu-west-1", + }: endpoint{ + Hostname: "bedrock.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "bedrock-eu-west-2", + }: endpoint{ + Hostname: "bedrock.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "bedrock-eu-west-3", + }: endpoint{ + Hostname: "bedrock.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "bedrock-fips-ca-central-1", + }: endpoint{ + Hostname: "bedrock-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "bedrock-fips-us-east-1", + }: endpoint{ + Hostname: "bedrock-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "bedrock-fips-us-west-2", + }: endpoint{ + Hostname: "bedrock-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "bedrock-runtime-ap-northeast-1", + }: endpoint{ + Hostname: "bedrock-runtime.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-ap-south-1", + }: endpoint{ + Hostname: "bedrock-runtime.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-ap-southeast-1", + }: endpoint{ + Hostname: "bedrock-runtime.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-ap-southeast-2", + }: endpoint{ + Hostname: "bedrock-runtime.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "bedrock-runtime-ca-central-1", + }: endpoint{ + Hostname: "bedrock-runtime.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-eu-central-1", + }: endpoint{ + Hostname: "bedrock-runtime.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-eu-west-1", + }: endpoint{ + Hostname: "bedrock-runtime.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-eu-west-2", + }: endpoint{ + Hostname: "bedrock-runtime.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "bedrock-runtime-eu-west-3", + }: endpoint{ + Hostname: "bedrock-runtime.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "bedrock-runtime-fips-ca-central-1", + }: endpoint{ + Hostname: "bedrock-runtime-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-fips-us-east-1", + }: endpoint{ + Hostname: "bedrock-runtime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-fips-us-west-2", + }: endpoint{ + Hostname: "bedrock-runtime-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "bedrock-runtime-sa-east-1", + }: endpoint{ + Hostname: "bedrock-runtime.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-us-east-1", + }: endpoint{ + Hostname: "bedrock-runtime.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-us-west-2", + }: endpoint{ + Hostname: "bedrock-runtime.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "bedrock-sa-east-1", + }: endpoint{ + Hostname: "bedrock.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "bedrock-us-east-1", + }: endpoint{ + Hostname: "bedrock.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "bedrock-us-west-2", + }: endpoint{ + Hostname: "bedrock.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "billingconductor": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "billingconductor.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "braket": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "budgets": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "budgets.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cases": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{}, + }, + }, + "cassandra": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cassandra-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cassandra-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cassandra-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cassandra-fips.us-west-2.amazonaws.com", + }, + }, + }, + "catalog.marketplace": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "ce": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "ce.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "chime": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "chime.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cleanrooms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "cloud9": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.ca-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "cloud9-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cloud9-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "cloud9-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "cloud9-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cloud9-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-west-2.api.aws", + }, + }, + }, + "cloudcontrolapi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ca-west-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-west-3.api.aws", + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-2.api.aws", + }, + }, + }, + "clouddirectory": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "cloudformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "cloudformation-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "cloudformation-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "cloudformation-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "cloudformation-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "cloudfront.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloudhsm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "cloudhsmv2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "cloudsearch": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "cloudtrail": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cloudtrail-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "cloudtrail-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "cloudtrail-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cloudtrail-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail-fips.us-west-2.amazonaws.com", + }, + }, + }, + "cloudtrail-data": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "codeartifact": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "codebuild": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "codebuild-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "codebuild-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "codebuild-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "codebuild-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "codecatalyst": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "codecatalyst.global.api.aws", + }, + }, + }, + "codecommit": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "codecommit-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "codecommit-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "codecommit-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "codecommit-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "codecommit-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "codecommit-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "codedeploy": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "codedeploy-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "codedeploy-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "codedeploy-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "codedeploy-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "codeguru-reviewer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "codepipeline": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "codepipeline-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "codepipeline-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "codepipeline-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "codepipeline-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "codepipeline-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-west-2.amazonaws.com", + }, + }, + }, + "codestar": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "codestar-connections": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "codestar-notifications": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "cognito-identity": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "cognito-identity-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", + }, + }, + }, + "cognito-idp": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cognito-idp-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "cognito-idp-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "cognito-idp-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cognito-idp-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-idp-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-idp-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-idp-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-idp-fips.us-west-2.amazonaws.com", + }, + }, + }, + "cognito-sync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "comprehend": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "comprehend-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "comprehend-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "comprehend-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehend-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehend-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehend-fips.us-west-2.amazonaws.com", + }, + }, + }, + "comprehendmedical": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehendmedical-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "comprehendmedical-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com", + }, + }, + }, + "compute-optimizer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "compute-optimizer.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "compute-optimizer.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "compute-optimizer.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "compute-optimizer.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "compute-optimizer.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "compute-optimizer.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "compute-optimizer.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "compute-optimizer.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "compute-optimizer.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "compute-optimizer.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "compute-optimizer.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "compute-optimizer.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "compute-optimizer.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "compute-optimizer.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "compute-optimizer.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "compute-optimizer.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "compute-optimizer.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "compute-optimizer.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "compute-optimizer.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "compute-optimizer.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "compute-optimizer.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "compute-optimizer.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "compute-optimizer.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "compute-optimizer.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "compute-optimizer.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "compute-optimizer.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "compute-optimizer.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "compute-optimizer.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "config": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "config-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "config-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "config-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "config-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config-fips.us-west-2.amazonaws.com", + }, + }, + }, + "connect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "connect-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "connect-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "connect-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "connect-fips.us-west-2.amazonaws.com", + }, + }, + }, + "connect-campaigns": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "connect-campaigns-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "connect-campaigns-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "connect-campaigns-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "connect-campaigns-fips.us-west-2.amazonaws.com", + }, + }, + }, + "contact-lens": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "controltower": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "controltower-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "controltower-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "controltower-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "controltower-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "controltower-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "controltower-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "cost-optimization-hub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "cost-optimization-hub.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cur": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "data-ats.iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "iotdata", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "data.iot-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "data.iot-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "data.iot-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "data.iot-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "data.iot-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-west-2.amazonaws.com", + }, + }, + }, + "data.jobs.iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "data.jobs.iot-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-west-2.amazonaws.com", + }, + }, + }, + "data.mediastore": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "databrew": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "databrew-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "databrew-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "databrew-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "databrew-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "databrew-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "databrew-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "databrew-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "databrew-fips.us-west-2.amazonaws.com", + }, + }, + }, + "dataexchange": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "datapipeline": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "datasync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "datasync-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "datasync-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "datasync-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "datasync-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "datasync-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "datasync-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-west-2.amazonaws.com", + }, + }, + }, + "datazone": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "datazone.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "datazone.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "datazone.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "datazone.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "datazone.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "datazone.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "datazone.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "datazone.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "datazone.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "datazone.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "datazone.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "datazone.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datazone-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "datazone.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "datazone.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "datazone.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "datazone.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "datazone.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "datazone.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "datazone.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "datazone.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "datazone.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "datazone.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "datazone.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "datazone.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "datazone.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "datazone.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datazone-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "datazone.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datazone-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "datazone.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "datazone.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datazone-fips.us-west-2.amazonaws.com", + }, + }, + }, + "dax": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "devicefarm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "devops-guru": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "devops-guru-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "devops-guru-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "devops-guru-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "devops-guru-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "devops-guru-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.us-west-2.amazonaws.com", + }, + }, + }, + "directconnect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "directconnect-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "directconnect-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "directconnect-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "directconnect-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "directconnect-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "directconnect-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-west-2.amazonaws.com", + }, + }, + }, + "discovery": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "dms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "dms", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms-fips", + }: endpoint{ + Hostname: "dms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "dms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "dms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "dms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "dms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "docdb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "rds.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "rds.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "rds.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "rds.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "drs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "drs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "drs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "drs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "drs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ds-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-west-2.amazonaws.com", + }, + }, + }, + "dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "dynamodb-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "local", + }: endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "ebs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ebs-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ebs-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ebs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ebs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ebs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ebs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ec2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ec2-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ec2-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ec2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ec2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ec2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ec2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ecs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ecs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ecs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ecs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ecs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-west-2.amazonaws.com", + }, + }, + }, + "edge.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "eks": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.eks.{region}.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "fips.eks.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "fips.eks.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "fips.eks.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "fips.eks.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.eks.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.eks.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.eks.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.eks.us-west-2.amazonaws.com", + }, + }, + }, + "eks-auth": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "eks-auth.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "eks-auth.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "eks-auth.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "eks-auth.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "eks-auth.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "eks-auth.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "eks-auth.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "eks-auth.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "eks-auth.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "eks-auth.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "eks-auth.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "eks-auth.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "eks-auth.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "eks-auth.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "eks-auth.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "eks-auth.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "eks-auth.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "eks-auth.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "eks-auth.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "eks-auth.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "eks-auth.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "eks-auth.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "eks-auth.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "eks-auth.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "eks-auth.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "eks-auth.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "eks-auth.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "eks-auth.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "eks-auth.us-west-2.api.aws", + }, + }, + }, + "elasticache": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "elasticache-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "elasticache-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "elasticache-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "elasticbeanstalk": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com", + }, + }, + }, + "elasticfilesystem": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-4.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com", + }, + endpointKey{ + Region: "fips-af-south-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-east-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-3", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-south-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-south-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-3", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-4", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-central-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-central-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-north-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-south-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-south-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-3", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-il-central-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-me-central-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-me-south-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-sa-east-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.il-central-1.amazonaws.com", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.me-central-1.amazonaws.com", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com", + }, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com", + }, + }, + }, + "elasticmapreduce": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "{region}.{service}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "elasticmapreduce-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com", + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com", + }, + }, + }, + "elastictranscoder": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "email": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "email-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "email-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "email-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "email-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "email-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-west-2.amazonaws.com", + }, + }, + }, + "emr-containers": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "emr-containers-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "emr-containers-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "emr-containers-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "emr-containers-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "emr-containers-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers-fips.us-west-2.amazonaws.com", + }, + }, + }, + "emr-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "emr-serverless-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "emr-serverless-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "emr-serverless-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "emr-serverless-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "emr-serverless-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless-fips.us-west-2.amazonaws.com", + }, + }, + }, + "entitlement.marketplace": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "es": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-west-3.api.aws", + }, + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "es-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "es-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "es-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "es-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "es-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "events": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "events-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "events-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "events-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "events-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events-fips.us-west-2.amazonaws.com", + }, + }, + }, + "evidently": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "evidently.ap-northeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "evidently.ap-southeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "evidently.ap-southeast-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "evidently.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "evidently.eu-north-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "evidently.eu-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "evidently.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "evidently.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "evidently.us-west-2.amazonaws.com", + }, + }, + }, + "finspace": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "finspace-api": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "firehose": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "firehose-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "firehose-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "firehose-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "firehose-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-west-2.amazonaws.com", + }, + }, + }, + "fms": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.af-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-east-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-northeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-northeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-southeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-southeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.eu-south-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.eu-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.eu-west-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.eu-west-3.amazonaws.com", + }, + endpointKey{ + Region: "fips-af-south-1", + }: endpoint{ + Hostname: "fms-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-east-1", + }: endpoint{ + Hostname: "fms-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-1", + }: endpoint{ + Hostname: "fms-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-2", + }: endpoint{ + Hostname: "fms-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-south-1", + }: endpoint{ + Hostname: "fms-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-1", + }: endpoint{ + Hostname: "fms-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-2", + }: endpoint{ + Hostname: "fms-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "fms-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "fms-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-central-1", + }: endpoint{ + Hostname: "fms-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-south-1", + }: endpoint{ + Hostname: "fms-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-1", + }: endpoint{ + Hostname: "fms-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-2", + }: endpoint{ + Hostname: "fms-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-3", + }: endpoint{ + Hostname: "fms-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-me-south-1", + }: endpoint{ + Hostname: "fms-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-sa-east-1", + }: endpoint{ + Hostname: "fms-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "fms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "fms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "fms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "fms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.me-south-1.amazonaws.com", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.sa-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-west-2.amazonaws.com", + }, + }, + }, + "forecast": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "forecast-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "forecast-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "forecast-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecast-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecast-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecast-fips.us-west-2.amazonaws.com", + }, + }, + }, + "forecastquery": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "forecastquery-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "forecastquery-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "forecastquery-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecastquery-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecastquery-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecastquery-fips.us-west-2.amazonaws.com", + }, + }, + }, + "frauddetector": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "fsx": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "fsx-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "fsx-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-prod-ca-central-1", + }: endpoint{ + Hostname: "fsx-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-prod-ca-west-1", + }: endpoint{ + Hostname: "fsx-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-prod-us-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-prod-us-east-2", + }: endpoint{ + Hostname: "fsx-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-prod-us-west-1", + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-prod-us-west-2", + }: endpoint{ + Hostname: "fsx-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "fsx-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "fsx-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "prod-ca-central-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-ca-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-west-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-west-2.amazonaws.com", + }, + }, + }, + "gamelift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "geo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "glacier-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "glacier-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "glacier-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "glacier-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "glacier-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier-fips.us-west-2.amazonaws.com", + }, + }, + }, + "globalaccelerator": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "globalaccelerator-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "glue": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "glue-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "glue-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "glue-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "glue-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-west-2.amazonaws.com", + }, + }, + }, + "grafana": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "grafana.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "grafana.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "grafana.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "grafana.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "grafana.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "grafana.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "grafana.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "grafana.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "grafana.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "grafana.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "greengrass-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "greengrass-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "greengrass-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "greengrass-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass-fips.us-west-2.amazonaws.com", + }, + }, + }, + "groundstation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "groundstation-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "groundstation-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "groundstation-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "groundstation-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "groundstation-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "groundstation-fips.us-west-2.amazonaws.com", + }, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "guardduty-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "guardduty-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "guardduty-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "guardduty-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "health": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "health.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "global.health.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "health-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "health-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "healthlake": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "iam.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iam-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global-fips", + }: endpoint{ + Hostname: "iam-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "iam", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "iam", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iam-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "iam-fips", + }: endpoint{ + Hostname: "iam-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "identity-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "identity-chime-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "identity-chime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "identitystore": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "importexport": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "importexport.amazonaws.com", + SignatureVersions: []string{"v2", "v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + Service: "IngestionService", + }, + }, + }, + }, + "ingest.timestream": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "ingest-fips-us-east-1", + }: endpoint{ + Hostname: "ingest.timestream-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-fips-us-east-2", + }: endpoint{ + Hostname: "ingest.timestream-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-fips-us-west-2", + }: endpoint{ + Hostname: "ingest.timestream-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ingest.timestream-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ingest.timestream-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-west-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ingest.timestream-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "inspector": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "inspector-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "inspector-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "inspector-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "inspector-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-west-2.amazonaws.com", + }, + }, + }, + "inspector2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "inspector2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "inspector2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "inspector2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "inspector2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-west-2.amazonaws.com", + }, + }, + }, + "internetmonitor": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "internetmonitor.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "internetmonitor.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "internetmonitor.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "internetmonitor.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "internetmonitor.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "internetmonitor.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "internetmonitor.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "internetmonitor.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "internetmonitor.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "internetmonitor.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "internetmonitor.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "internetmonitor.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "internetmonitor.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "internetmonitor.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "internetmonitor.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "internetmonitor.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "internetmonitor.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "internetmonitor.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "internetmonitor.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "internetmonitor.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "internetmonitor.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "internetmonitor.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "internetmonitor.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "internetmonitor.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "internetmonitor.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "internetmonitor.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "internetmonitor.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "internetmonitor.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "internetmonitor.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.us-west-2.amazonaws.com", + }, + }, + }, + "iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "iot-fips.ca-central-1.amazonaws.com", + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "iot-fips.us-east-1.amazonaws.com", + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "iot-fips.us-east-2.amazonaws.com", + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "iot-fips.us-west-1.amazonaws.com", + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "iot-fips.us-west-2.amazonaws.com", + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-west-2.amazonaws.com", + }, + }, + }, + "iotanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "iotevents": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotevents-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "iotevents-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "iotevents-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "iotevents-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "iotevents-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotevents-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotevents-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotevents-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ioteventsdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "data.iotevents.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "data.iotevents.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "data.iotevents.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "data.iotevents.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "data.iotevents.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "data.iotevents.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iotevents-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "data.iotevents.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "data.iotevents.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "data.iotevents.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "data.iotevents-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "data.iotevents-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "data.iotevents-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "data.iotevents-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "data.iotevents.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iotevents-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "data.iotevents.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iotevents-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "data.iotevents.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iotevents-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "iotfleetwise": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "iotsecuredtunneling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", + }, + }, + }, + "iotsitewise": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotsitewise-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "iotsitewise-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "iotsitewise-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "iotsitewise-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "iotsitewise-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotsitewise-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotsitewise-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotsitewise-fips.us-west-2.amazonaws.com", + }, + }, + }, + "iotthingsgraph": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "iotthingsgraph", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "iottwinmaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "api-ap-northeast-1", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "api-ap-northeast-2", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "api-ap-south-1", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "api-ap-southeast-1", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "api-ap-southeast-2", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "api-eu-central-1", + }: endpoint{ + Hostname: "api.iottwinmaker.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "api-eu-west-1", + }: endpoint{ + Hostname: "api.iottwinmaker.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "api-us-east-1", + }: endpoint{ + Hostname: "api.iottwinmaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "api-us-west-2", + }: endpoint{ + Hostname: "api.iottwinmaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "data-ap-northeast-1", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "data-ap-northeast-2", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "data-ap-south-1", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "data-ap-southeast-1", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "data-ap-southeast-2", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "data-eu-central-1", + }: endpoint{ + Hostname: "data.iottwinmaker.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "data-eu-west-1", + }: endpoint{ + Hostname: "data.iottwinmaker.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "data-us-east-1", + }: endpoint{ + Hostname: "data.iottwinmaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "data-us-west-2", + }: endpoint{ + Hostname: "data.iottwinmaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-api-us-east-1", + }: endpoint{ + Hostname: "api.iottwinmaker-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-api-us-west-2", + }: endpoint{ + Hostname: "api.iottwinmaker-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "fips-data-us-east-1", + }: endpoint{ + Hostname: "data.iottwinmaker-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-data-us-west-2", + }: endpoint{ + Hostname: "data.iottwinmaker-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "iottwinmaker-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "iottwinmaker-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iottwinmaker-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iottwinmaker-fips.us-west-2.amazonaws.com", + }, + }, + }, + "iotwireless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "api.iotwireless.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "api.iotwireless.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "api.iotwireless.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.iotwireless.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.iotwireless.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "ivs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "ivschat": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "ivsrealtime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "kafka": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "kafka-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "kafka-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "kafka-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "kafka-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "kafka-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "kafka-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.us-west-2.amazonaws.com", + }, + }, + }, + "kafkaconnect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "kendra": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "kendra-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "kendra-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "kendra-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "kendra-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-fips.us-west-2.amazonaws.com", + }, + }, + }, + "kendra-ranking": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "kendra-ranking.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "kendra-ranking.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "kendra-ranking.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "kendra-ranking.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "kendra-ranking.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "kendra-ranking.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "kendra-ranking.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "kendra-ranking.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "kendra-ranking.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "kendra-ranking.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "kendra-ranking.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "kendra-ranking.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-ranking-fips.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "kendra-ranking.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "kendra-ranking.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "kendra-ranking.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "kendra-ranking.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "kendra-ranking.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "kendra-ranking.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "kendra-ranking.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "kendra-ranking.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "kendra-ranking.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "kendra-ranking.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "kendra-ranking.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "kendra-ranking.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-ranking-fips.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "kendra-ranking.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-ranking-fips.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "kendra-ranking.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "kendra-ranking.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-ranking-fips.us-west-2.api.aws", + }, + }, + }, + "kinesis": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "kinesis-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "kinesis-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "kinesis-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "kinesis-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis-fips.us-west-2.amazonaws.com", + }, + }, + }, + "kinesisanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "kinesisvideo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "kms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ProdFips", + }: endpoint{ + Hostname: "kms-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.af-south-1.amazonaws.com", + }, + endpointKey{ + Region: "af-south-1-fips", + }: endpoint{ + Hostname: "kms-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-east-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-east-1-fips", + }: endpoint{ + Hostname: "kms-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-northeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-1-fips", + }: endpoint{ + Hostname: "kms-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-northeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-2-fips", + }: endpoint{ + Hostname: "kms-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-northeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-3-fips", + }: endpoint{ + Hostname: "kms-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-1-fips", + }: endpoint{ + Hostname: "kms-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-south-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-2-fips", + }: endpoint{ + Hostname: "kms-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-southeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-1-fips", + }: endpoint{ + Hostname: "kms-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-southeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-2-fips", + }: endpoint{ + Hostname: "kms-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-southeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-3-fips", + }: endpoint{ + Hostname: "kms-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-southeast-4.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-4-fips", + }: endpoint{ + Hostname: "kms-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "kms-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "kms-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1-fips", + }: endpoint{ + Hostname: "kms-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-central-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-2-fips", + }: endpoint{ + Hostname: "kms-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-north-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-north-1-fips", + }: endpoint{ + Hostname: "kms-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-south-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-1-fips", + }: endpoint{ + Hostname: "kms-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-south-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-2-fips", + }: endpoint{ + Hostname: "kms-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-1-fips", + }: endpoint{ + Hostname: "kms-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-west-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-2-fips", + }: endpoint{ + Hostname: "kms-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-west-3.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-3-fips", + }: endpoint{ + Hostname: "kms-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.il-central-1.amazonaws.com", + }, + endpointKey{ + Region: "il-central-1-fips", + }: endpoint{ + Hostname: "kms-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.me-central-1.amazonaws.com", + }, + endpointKey{ + Region: "me-central-1-fips", + }: endpoint{ + Hostname: "kms-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.me-south-1.amazonaws.com", + }, + endpointKey{ + Region: "me-south-1-fips", + }: endpoint{ + Hostname: "kms-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.sa-east-1.amazonaws.com", + }, + endpointKey{ + Region: "sa-east-1-fips", + }: endpoint{ + Hostname: "kms-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "kms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "kms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "lakeformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "lakeformation-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "lakeformation-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "lakeformation-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "lakeformation-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-west-2.amazonaws.com", + }, + }, + }, + "lambda": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-west-3.api.aws", + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "lambda-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "lambda-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "lambda-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "lambda-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-west-2.amazonaws.com", + }, + }, + }, + "license-manager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "license-manager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "license-manager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "license-manager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "license-manager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-west-2.amazonaws.com", + }, + }, + }, + "license-manager-linux-subscriptions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-west-2.amazonaws.com", + }, + }, + }, + "license-manager-user-subscriptions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-west-2.amazonaws.com", + }, + }, + }, + "lightsail": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "logs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ca-west-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-west-3.api.aws", + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "logs-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "logs-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "logs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "logs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "logs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "logs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.us-west-2.amazonaws.com", + }, + }, + }, + "lookoutequipment": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "lookoutmetrics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "lookoutvision": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "m2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{}, + }, + }, + "machinelearning": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "macie2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "macie2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "macie2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "macie2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "macie2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "macie2-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "macie2-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "macie2-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "macie2-fips.us-west-2.amazonaws.com", + }, + }, + }, + "managedblockchain": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "managedblockchain-query": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "marketplacecommerceanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "media-pipelines-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "media-pipelines-chime-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "media-pipelines-chime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "media-pipelines-chime-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "media-pipelines-chime-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "mediaconnect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mediaconvert": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "mediaconvert-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "mediaconvert-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "mediaconvert-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "mediaconvert-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert-fips.us-west-2.amazonaws.com", + }, + }, + }, + "medialive": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "medialive-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "medialive-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "medialive-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "medialive-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "medialive-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "medialive-fips.us-west-2.amazonaws.com", + }, + }, + }, + "mediapackage": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mediapackage-vod": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mediapackagev2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mediastore": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "meetings-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "memory-db": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "memory-db-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "messaging-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "messaging-chime-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "messaging-chime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "metering.marketplace": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mgh": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mgn": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "mgn-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "mgn-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "mgn-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "mgn-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-west-2.amazonaws.com", + }, + }, + }, + "migrationhub-orchestrator": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "migrationhub-strategy": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mobileanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "models-v2-lex": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "models.lex": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "models-fips.lex.{region}.{dnsSuffix}", + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "models-fips.lex.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "models-fips.lex.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "models-fips.lex.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "models-fips.lex.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "monitoring": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "monitoring-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "monitoring-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "monitoring-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "monitoring-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring-fips.us-west-2.amazonaws.com", + }, + }, + }, + "mq": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "mq-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "mq-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "mq-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "mq-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-west-2.amazonaws.com", + }, + }, + }, + "mturk-requester": service{ + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "sandbox", + }: endpoint{ + Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "neptune": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "rds.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "rds.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "rds.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "rds.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "rds.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "rds.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "rds.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "rds.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "network-firewall": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "network-firewall-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "network-firewall-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "network-firewall-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "network-firewall-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "network-firewall-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-west-2.amazonaws.com", + }, + }, + }, + "networkmanager": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "networkmanager.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "networkmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "fips-aws-global", + }: endpoint{ + Hostname: "networkmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "nimble": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "oam": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "oidc": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "oidc.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "oidc.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "oidc.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "oidc.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "oidc.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "oidc.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "oidc.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "oidc.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "oidc.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "oidc.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "oidc.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "oidc.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "oidc.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "oidc.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "oidc.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "oidc.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "oidc.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "oidc.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "oidc.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "oidc.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "oidc.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "oidc.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "oidc.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "oidc.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "oidc.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "oidc.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "oidc.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "oidc.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "oidc.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "omics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "omics.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "omics.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "omics.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "omics.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "omics-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "omics-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "omics.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "omics.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "omics-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "omics.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "omics-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "opsworks": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "opsworks-cm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "organizations.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "organizations-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-aws-global", + }: endpoint{ + Hostname: "organizations-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "osis": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "outposts-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "outposts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "outposts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "outposts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "outposts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts-fips.us-west-2.amazonaws.com", + }, + }, + }, + "participant.connect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "participant.connect-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "participant.connect-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "participant.connect-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "participant.connect-fips.us-west-2.amazonaws.com", + }, + }, + }, + "personalize": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "pi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.af-south-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-east-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-northeast-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-northeast-2.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-northeast-3.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-south-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-south-2.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-southeast-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-southeast-2.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-southeast-3.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-southeast-4.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ca-central-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.ca-central-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.ca-central-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ca-west-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.ca-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.ca-west-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-central-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-central-2.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-north-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-south-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-south-2.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-west-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-west-2.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-west-3.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "pi-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "pi-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "pi-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "pi-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "pi-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "pi-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.il-central-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.me-central-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.me-south-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.sa-east-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-east-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-east-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-east-2.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-east-2.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-east-2.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-west-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-west-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-west-2.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-west-2.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-west-2.api.aws", + Protocols: []string{"https"}, + }, + }, + }, + "pinpoint": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "pinpoint.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pinpoint-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "pinpoint-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "pinpoint-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "pinpoint-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "pinpoint-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "pinpoint.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pinpoint-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "pinpoint.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pinpoint-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "pinpoint.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pinpoint-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "pipes": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "polly": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "polly-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "polly-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "polly-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "polly-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "polly-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "polly-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "polly-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "polly-fips.us-west-2.amazonaws.com", + }, + }, + }, + "portal.sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "portal.sso.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "portal.sso.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "portal.sso.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "portal.sso.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "portal.sso.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "portal.sso.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "portal.sso.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "portal.sso.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "portal.sso.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "portal.sso.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "portal.sso.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "portal.sso.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "portal.sso.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "portal.sso.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "portal.sso.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "portal.sso.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "portal.sso.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "portal.sso.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "portal.sso.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "portal.sso.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "portal.sso.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "portal.sso.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "portal.sso.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "portal.sso.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "portal.sso.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "portal.sso.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "portal.sso.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "portal.sso.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "portal.sso.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "private-networks": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "profile": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "profile-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "profile-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "profile-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "profile-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "profile-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "profile-fips.us-west-2.amazonaws.com", + }, + }, + }, + "projects.iot1click": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "proton": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "qbusiness": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "qbusiness.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "qbusiness.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "qbusiness.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "qbusiness.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "qbusiness.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "qbusiness.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "qbusiness.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "qbusiness.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "qbusiness.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "qbusiness.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "qbusiness.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "qbusiness.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "qbusiness.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "qbusiness.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "qbusiness.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "qbusiness.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "qbusiness.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "qbusiness.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "qbusiness.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "qbusiness.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "qbusiness.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "qbusiness.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "qbusiness.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "qbusiness.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "qbusiness.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "qbusiness.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "qbusiness.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "qbusiness.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "qbusiness.us-west-2.api.aws", + }, + }, + }, + "qldb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "qldb-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "qldb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "qldb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "qldb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "qldb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "qldb-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "qldb-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "qldb-fips.us-west-2.amazonaws.com", + }, + }, + }, + "quicksight": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "api", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "ram": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ram-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ram-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ram-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ram-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ram-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ram-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-west-2.amazonaws.com", + }, + }, + }, + "rbin": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "rbin-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "rbin-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "rbin-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "rbin-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "rbin-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "rbin-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-west-2.amazonaws.com", + }, + }, + }, + "rds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "rds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "rds-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "rds-fips.ca-central-1", + }: endpoint{ + Hostname: "rds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds-fips.ca-west-1", + }: endpoint{ + Hostname: "rds-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds-fips.us-east-1", + }: endpoint{ + Hostname: "rds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds-fips.us-east-2", + }: endpoint{ + Hostname: "rds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds-fips.us-west-1", + }: endpoint{ + Hostname: "rds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds-fips.us-west-2", + }: endpoint{ + Hostname: "rds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.ca-central-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.ca-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-west-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + SSLCommonName: "{service}.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-east-1.amazonaws.com", + SSLCommonName: "{service}.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "rds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "rds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "rds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "rds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "rds-data": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "rds-data-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "rds-data-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "rds-data-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "rds-data-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-data-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-data-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-data-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-data-fips.us-west-2.amazonaws.com", + }, + }, + }, + "redshift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "redshift-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "redshift-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "redshift-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "redshift-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "redshift-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "redshift-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-west-2.amazonaws.com", + }, + }, + }, + "redshift-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "redshift-serverless-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "redshift-serverless-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "redshift-serverless-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "redshift-serverless-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "redshift-serverless-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.us-west-2.amazonaws.com", + }, + }, + }, + "rekognition": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "rekognition-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "rekognition-fips.ca-central-1", + }: endpoint{ + Hostname: "rekognition-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition-fips.us-east-1", + }: endpoint{ + Hostname: "rekognition-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition-fips.us-east-2", + }: endpoint{ + Hostname: "rekognition-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition-fips.us-west-1", + }: endpoint{ + Hostname: "rekognition-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition-fips.us-west-2", + }: endpoint{ + Hostname: "rekognition-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.ca-central-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-west-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "rekognition-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "rekognition-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "rekognition-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "rekognition-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "resiliencehub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "resource-explorer-2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "resource-groups": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "resource-groups-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "resource-groups-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "resource-groups-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "resource-groups-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups-fips.us-west-2.amazonaws.com", + }, + }, + }, + "robomaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "rolesanywhere": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-west-2.amazonaws.com", + }, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "route53.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "route53-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-aws-global", + }: endpoint{ + Hostname: "route53-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "route53-recovery-control-config": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "route53-recovery-control-config.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "route53domains": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "route53resolver": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "rum": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "runtime-v2-lex": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "runtime.lex": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.lex.{region}.{dnsSuffix}", + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.lex.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "runtime-fips.lex.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.lex.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "runtime-fips.lex.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "runtime.sagemaker": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.sagemaker.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "s3": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.af-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-east-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "s3.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-northeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-northeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-south-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "s3.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "s3.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-southeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-southeast-4.amazonaws.com", + }, + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "s3.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-central-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-north-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-south-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-south-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "s3.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-west-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-west-3.amazonaws.com", + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "s3-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "s3-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "s3-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "s3-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "s3-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "s3-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.il-central-1.amazonaws.com", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.me-central-1.amazonaws.com", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.me-south-1.amazonaws.com", + }, + endpointKey{ + Region: "s3-external-1", + }: endpoint{ + Hostname: "s3-external-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "s3.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "s3.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "s3.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "s3.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + }, + "s3-control": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "s3-control.af-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.af-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "s3-control.ap-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "s3-control.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "s3-control.ap-northeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-northeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "s3-control.ap-northeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-northeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "s3-control.ap-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "s3-control.ap-south-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-south-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "s3-control.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "s3-control.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "s3-control.ap-southeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-southeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "s3-control.ap-southeast-4.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-southeast-4.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "s3-control.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "s3-control.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "s3-control.eu-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "s3-control.eu-central-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-central-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "s3-control.eu-north-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-north-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "s3-control.eu-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "s3-control.eu-south-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-south-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "s3-control.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "s3-control.eu-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "s3-control.eu-west-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-west-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "s3-control.il-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.il-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "s3-control.me-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.me-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "s3-control.me-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.me-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "s3-control.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "s3-control.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "s3-control.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "s3-control.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "s3-control.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "s3-outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + }, + }, + "sagemaker-geospatial": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "savingsplans": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "savingsplans.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "scheduler": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "schemas": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "sdb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v2"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "sdb.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "secretsmanager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + + Deprecated: boxedTrue, + }, + }, + }, + "securityhub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "securityhub-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "securityhub-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "securityhub-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "securityhub-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-west-2.amazonaws.com", + }, + }, + }, + "securitylake": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "securitylake-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "securitylake-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "securitylake-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "securitylake-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake-fips.us-west-2.amazonaws.com", + }, + }, + }, + "serverlessrepo": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "serverlessrepo-fips.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "serverlessrepo-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "serverlessrepo-fips.us-east-2.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "serverlessrepo-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "serverlessrepo-fips.us-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "serverlessrepo-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "serverlessrepo-fips.us-west-2.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "serverlessrepo-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "servicecatalog": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "servicecatalog-appregistry": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-west-2.amazonaws.com", + }, + }, + }, + "servicediscovery": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ca-west-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.ca-west-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "servicequotas": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "session.qldb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "session.qldb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "session.qldb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "session.qldb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "session.qldb-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "session.qldb-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "session.qldb-fips.us-west-2.amazonaws.com", + }, + }, + }, + "shield": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "shield.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "shield.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "shield-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-aws-global", + }: endpoint{ + Hostname: "shield-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "signer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "signer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "signer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "signer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "signer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-verification-us-east-1", + }: endpoint{ + Hostname: "verification.signer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-verification-us-east-2", + }: endpoint{ + Hostname: "verification.signer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "fips-verification-us-west-1", + }: endpoint{ + Hostname: "verification.signer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "fips-verification-us-west-2", + }: endpoint{ + Hostname: "verification.signer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "verification-af-south-1", + }: endpoint{ + Hostname: "verification.signer.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "verification-ap-east-1", + }: endpoint{ + Hostname: "verification.signer.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "verification-ap-northeast-1", + }: endpoint{ + Hostname: "verification.signer.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "verification-ap-northeast-2", + }: endpoint{ + Hostname: "verification.signer.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "verification-ap-south-1", + }: endpoint{ + Hostname: "verification.signer.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "verification-ap-southeast-1", + }: endpoint{ + Hostname: "verification.signer.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "verification-ap-southeast-2", + }: endpoint{ + Hostname: "verification.signer.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "verification-ca-central-1", + }: endpoint{ + Hostname: "verification.signer.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "verification-eu-central-1", + }: endpoint{ + Hostname: "verification.signer.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "verification-eu-north-1", + }: endpoint{ + Hostname: "verification.signer.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "verification-eu-south-1", + }: endpoint{ + Hostname: "verification.signer.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "verification-eu-west-1", + }: endpoint{ + Hostname: "verification.signer.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "verification-eu-west-2", + }: endpoint{ + Hostname: "verification.signer.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "verification-eu-west-3", + }: endpoint{ + Hostname: "verification.signer.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "verification-me-south-1", + }: endpoint{ + Hostname: "verification.signer.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "verification-sa-east-1", + }: endpoint{ + Hostname: "verification.signer.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "verification-us-east-1", + }: endpoint{ + Hostname: "verification.signer.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "verification-us-east-2", + }: endpoint{ + Hostname: "verification.signer.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "verification-us-west-1", + }: endpoint{ + Hostname: "verification.signer.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "verification-us-west-2", + }: endpoint{ + Hostname: "verification.signer.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "simspaceweaver": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "sms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "sms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-fips.us-west-2.amazonaws.com", + }, + }, + }, + "sms-voice": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "sms-voice-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "sms-voice-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "sms-voice-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "sms-voice-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "sms-voice-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-west-2.amazonaws.com", + }, + }, + }, + "snowball": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-southeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.eu-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.eu-west-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.eu-west-3.amazonaws.com", + }, + endpointKey{ + Region: "fips-ap-northeast-1", + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-2", + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-3", + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-south-1", + }: endpoint{ + Hostname: "snowball-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-1", + }: endpoint{ + Hostname: "snowball-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-2", + }: endpoint{ + Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "snowball-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-central-1", + }: endpoint{ + Hostname: "snowball-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-1", + }: endpoint{ + Hostname: "snowball-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-2", + }: endpoint{ + Hostname: "snowball-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-3", + }: endpoint{ + Hostname: "snowball-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-sa-east-1", + }: endpoint{ + Hostname: "snowball-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "snowball-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "snowball-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "snowball-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "snowball-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.sa-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-west-2.amazonaws.com", + }, + }, + }, + "sns": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "sns-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "sns-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "sns-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "sns-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "sns-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.us-west-2.amazonaws.com", + }, + }, + }, + "sqs": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "sqs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "sqs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "sqs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "sqs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + SSLCommonName: "queue.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs-fips.us-east-1.amazonaws.com", + SSLCommonName: "queue.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ssm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ssm-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ssm-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ssm-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ssm-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ssm-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ssm-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ssm-contacts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ssm-contacts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ssm-contacts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ssm-contacts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ssm-contacts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-contacts-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-contacts-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-contacts-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-contacts-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ssm-incidents": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ssm-incidents-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ssm-incidents-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ssm-incidents-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ssm-incidents-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ssm-incidents-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ssm-sap": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ssm-sap-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ssm-sap-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ssm-sap-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ssm-sap-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ssm-sap-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.us-west-2.amazonaws.com", + }, + }, + }, + "sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "states": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "states-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "states-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "states-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "states-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states-fips.us-west-2.amazonaws.com", + }, + }, + }, + "storagegateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "streams.dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "local", + }: endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "sts": service{ + PartitionEndpoint: "aws-global", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "support": service{ + PartitionEndpoint: "aws-global", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "support.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "supportapp": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "swf": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "swf-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "swf-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "swf-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "swf-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf-fips.us-west-2.amazonaws.com", + }, + }, + }, + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "synthetics-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "synthetics-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "synthetics-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "synthetics-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-west-2.amazonaws.com", + }, + }, + }, + "tagging": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "tax": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "tax.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "textract": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.ca-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.eu-west-3.api.aws", + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "textract-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "textract-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "textract-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "textract-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "textract-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-west-2.api.aws", + }, + }, + }, + "thinclient": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "tnb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "fips.transcribe.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "fips.transcribe.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "fips.transcribe.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "fips.transcribe.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "fips.transcribe.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-west-2.amazonaws.com", + }, + }, + }, + "transcribestreaming": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "transcribestreaming-ca-central-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transcribestreaming-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-fips-ca-central-1", + }: endpoint{ + Hostname: "transcribestreaming-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-fips-us-east-1", + }: endpoint{ + Hostname: "transcribestreaming-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-fips-us-east-2", + }: endpoint{ + Hostname: "transcribestreaming-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-fips-us-west-2", + }: endpoint{ + Hostname: "transcribestreaming-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-us-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transcribestreaming-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transcribestreaming-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-us-west-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transcribestreaming-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "transfer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "transfer-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "transfer-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "transfer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "transfer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "transfer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "transfer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-west-2.amazonaws.com", + }, + }, + }, + "translate": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "translate-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "translate-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "translate-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "translate-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "translate-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "translate-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "translate-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "translate-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "verifiedpermissions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-west-2.amazonaws.com", + }, + }, + }, + "voice-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "voice-chime-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "voice-chime-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "voice-chime-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "voice-chime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "voice-chime-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "voice-chime-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "voiceid": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "voiceid-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "voiceid-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "voiceid-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "voiceid-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "voiceid-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "voiceid-fips.us-west-2.amazonaws.com", + }, + }, + }, + "vpc-lattice": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "waf": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "aws", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "aws-fips", + }: endpoint{ + Hostname: "waf-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "waf.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global-fips", + }: endpoint{ + Hostname: "waf-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "waf-regional": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "waf-regional.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "af-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "waf-regional.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "waf-regional.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "waf-regional.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "waf-regional.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "waf-regional.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "waf-regional.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "waf-regional.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "waf-regional.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "waf-regional.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "waf-regional.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "waf-regional.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "waf-regional.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "waf-regional.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-central-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "waf-regional.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "waf-regional.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "waf-regional.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "waf-regional.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "waf-regional.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "waf-regional.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "fips-af-south-1", + }: endpoint{ + Hostname: "waf-regional-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-east-1", + }: endpoint{ + Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-1", + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-2", + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-3", + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-south-1", + }: endpoint{ + Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-south-2", + }: endpoint{ + Hostname: "waf-regional-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-1", + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-2", + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-3", + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-4", + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-central-1", + }: endpoint{ + Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-central-2", + }: endpoint{ + Hostname: "waf-regional-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-north-1", + }: endpoint{ + Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-south-1", + }: endpoint{ + Hostname: "waf-regional-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-south-2", + }: endpoint{ + Hostname: "waf-regional-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-1", + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-2", + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-3", + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-il-central-1", + }: endpoint{ + Hostname: "waf-regional-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-me-central-1", + }: endpoint{ + Hostname: "waf-regional-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-me-south-1", + }: endpoint{ + Hostname: "waf-regional-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-sa-east-1", + }: endpoint{ + Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "waf-regional-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "waf-regional-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "waf-regional-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "waf-regional-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "waf-regional.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "il-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "waf-regional.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "waf-regional.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "me-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "waf-regional.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "waf-regional.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "waf-regional.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "waf-regional.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "waf-regional.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "wafv2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "wafv2.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "af-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "wafv2.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "wafv2.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "wafv2.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "wafv2.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "wafv2.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "wafv2.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "wafv2.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "wafv2.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "wafv2.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "wafv2.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "wafv2.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "wafv2.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "wafv2.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "wafv2.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-central-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "wafv2.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "wafv2.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "wafv2.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "wafv2.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "wafv2.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "wafv2.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "fips-af-south-1", + }: endpoint{ + Hostname: "wafv2-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-east-1", + }: endpoint{ + Hostname: "wafv2-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-1", + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-2", + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-3", + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-south-1", + }: endpoint{ + Hostname: "wafv2-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-south-2", + }: endpoint{ + Hostname: "wafv2-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-1", + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-2", + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-3", + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-4", + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "wafv2-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "wafv2-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-central-1", + }: endpoint{ + Hostname: "wafv2-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-central-2", + }: endpoint{ + Hostname: "wafv2-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-north-1", + }: endpoint{ + Hostname: "wafv2-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-south-1", + }: endpoint{ + Hostname: "wafv2-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-south-2", + }: endpoint{ + Hostname: "wafv2-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-1", + }: endpoint{ + Hostname: "wafv2-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-2", + }: endpoint{ + Hostname: "wafv2-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-3", + }: endpoint{ + Hostname: "wafv2-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-il-central-1", + }: endpoint{ + Hostname: "wafv2-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-me-central-1", + }: endpoint{ + Hostname: "wafv2-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-me-south-1", + }: endpoint{ + Hostname: "wafv2-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-sa-east-1", + }: endpoint{ + Hostname: "wafv2-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "wafv2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "wafv2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "wafv2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "wafv2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "wafv2.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "il-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "wafv2.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "wafv2.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "me-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "wafv2.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "wafv2.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "wafv2.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "wafv2.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "wafv2.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "wellarchitected": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "wisdom": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ui-ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ui-ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ui-ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ui-ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ui-ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ui-eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "ui-eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "ui-us-east-1", + }: endpoint{}, + endpointKey{ + Region: "ui-us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{}, + }, + }, + "workdocs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "workdocs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "workdocs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workdocs-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workdocs-fips.us-west-2.amazonaws.com", + }, + }, + }, + "workmail": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "workspaces": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "workspaces-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "workspaces-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workspaces-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workspaces-fips.us-west-2.amazonaws.com", + }, + }, + }, + "workspaces-web": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "xray": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "xray-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "xray-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "xray-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "xray-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-west-2.amazonaws.com", + }, + }, + }, + }, +} + +// AwsCnPartition returns the Resolver for AWS China. +func AwsCnPartition() Partition { + return awscnPartition.Partition() +} + +var awscnPartition = partition{ + ID: "aws-cn", + Name: "AWS China", + DNSSuffix: "amazonaws.com.cn", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{ + "cn-north-1": region{ + Description: "China (Beijing)", + }, + "cn-northwest-1": region{ + Description: "China (Ningxia)", + }, + }, + Services: services{ + "access-analyzer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "account": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "account.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "acm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "acm-pca": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "airflow": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "api.ecr": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "api.ecr.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "api.pricing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "api.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "api.tunneling.iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "apigateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "appconfig": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "applicationinsights": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "appmesh": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "appsync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "athena": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "autoscaling-plans": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "backup": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "batch": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "budgets": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "budgets.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "cassandra": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "ce": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "ce.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "cloudcontrolapi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "cloudformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "cloudtrail": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "codebuild": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "codecommit": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "codedeploy": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "codepipeline": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "cognito-identity": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "compute-optimizer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "compute-optimizer.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "compute-optimizer.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "config": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "cur": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "data-ats.iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "iotdata", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "data.ats.iot.cn-north-1.amazonaws.com.cn", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "data.jobs.iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "databrew": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "datasync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "datazone": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "datazone.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "datazone.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "dax": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "directconnect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "dms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "docdb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "rds.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "ds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "ebs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "ecs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "eks": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "eks-auth": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "eks-auth.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "eks-auth.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "elasticache": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "elasticbeanstalk": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "elasticfilesystem": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn", + }, + endpointKey{ + Region: "fips-cn-north-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-cn-northwest-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "emr-containers": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "emr-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "entitlement.marketplace": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "entitlement-marketplace.cn-northwest-1.amazonaws.com.cn", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "es": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "events": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "firehose": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "firehose.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "firehose.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "fms": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "fsx": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "gamelift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "glue": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "health": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "health.cn-northwest-1.amazonaws.com.cn", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "global.health.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "iam.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "identitystore": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "inspector2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "internetmonitor": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "internetmonitor.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "internetmonitor.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "iotanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "iotevents": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "ioteventsdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "data.iotevents.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "iotsecuredtunneling": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "iotsitewise": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "iottwinmaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "api-cn-north-1", + }: endpoint{ + Hostname: "api.iottwinmaker.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "data-cn-north-1", + }: endpoint{ + Hostname: "data.iottwinmaker.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "kafka": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "kendra-ranking": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "kendra-ranking.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "kendra-ranking.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "kinesis": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "kinesisanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "kinesisvideo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "kms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "lakeformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "lambda": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "license-manager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "license-manager-linux-subscriptions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "logs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "mediaconvert": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "mediaconvert.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "memory-db": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "monitoring": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "mq": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "neptune": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "rds.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "rds.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "network-firewall": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "oam": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "oidc": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "oidc.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "oidc.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "organizations.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "personalize": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "pi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.cn-north-1.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.cn-northwest-1.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + }, + }, + }, + "pipes": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "polly": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "portal.sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "portal.sso.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "portal.sso.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "qbusiness": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "qbusiness.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "qbusiness.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "quicksight": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "ram": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "rbin": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "rds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "redshift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "redshift-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "resource-groups": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "rolesanywhere": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "route53.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "route53resolver": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "runtime.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "s3": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com.cn", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.cn-north-1.amazonaws.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.cn-northwest-1.amazonaws.com.cn", + }, + }, + }, + "s3-control": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "s3-control.cn-north-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.cn-north-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.cn-northwest-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "savingsplans": service{ + IsRegionalized: boxedTrue, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "savingsplans.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "savingsplans.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "schemas": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "secretsmanager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{}, + }, + }, + "securityhub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "servicecatalog": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "servicediscovery": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "servicequotas": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "signer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "verification-cn-north-1", + }: endpoint{ + Hostname: "verification.signer.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "verification-cn-northwest-1", + }: endpoint{ + Hostname: "verification.signer.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "sms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "snowball": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn", + }, + endpointKey{ + Region: "fips-cn-north-1", + }: endpoint{ + Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-cn-northwest-1", + }: endpoint{ + Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "sns": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "ssm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "states": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "states.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "states.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "storagegateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "sts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-cn-global", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "support.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "swf": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "tagging": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "cn.transcribe.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "cn.transcribe.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "transcribestreaming": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "transfer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "waf-regional": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "waf-regional.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "waf-regional.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + endpointKey{ + Region: "fips-cn-north-1", + }: endpoint{ + Hostname: "waf-regional-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-cn-northwest-1", + }: endpoint{ + Hostname: "waf-regional-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "wafv2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "wafv2.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "wafv2.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + endpointKey{ + Region: "fips-cn-north-1", + }: endpoint{ + Hostname: "wafv2-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-cn-northwest-1", + }: endpoint{ + Hostname: "wafv2-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "workspaces": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "xray": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + }, +} + +// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). +func AwsUsGovPartition() Partition { + return awsusgovPartition.Partition() +} + +var awsusgovPartition = partition{ + ID: "aws-us-gov", + Name: "AWS GovCloud (US)", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{ + "us-gov-east-1": region{ + Description: "AWS GovCloud (US-East)", + }, + "us-gov-west-1": region{ + Description: "AWS GovCloud (US-West)", + }, + }, + Services: services{ + "access-analyzer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "access-analyzer.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "access-analyzer.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "acm": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "acm.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "acm.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "acm-pca": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "acm-pca.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "acm-pca.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca.us-gov-west-1.amazonaws.com", + }, + }, + }, + "api.detective": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "api.detective-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "api.detective-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "api.ecr": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "dkr-us-gov-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-gov-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-dkr-us-gov-east-1", + }: endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-dkr-us-gov-west-1", + }: endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "api.ecr.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "api.ecr.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "api.sagemaker": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1-fips-secondary", + }: endpoint{ + Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1-secondary", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1-secondary", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "api.tunneling.iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "apigateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "appconfig": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "appconfig.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "appconfig.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appconfig.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appconfig.us-gov-west-1.amazonaws.com", + }, + }, + }, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "appconfigdata.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "appconfigdata.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appconfigdata.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appconfigdata.us-gov-west-1.amazonaws.com", + }, + }, + }, + "application-autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + + Deprecated: boxedTrue, + }, + }, + }, + "applicationinsights": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "applicationinsights.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "applicationinsights.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "appstream2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appstream2-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "appstream2-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "athena": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "athena-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "athena-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-gov-west-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-gov-west-1.api.aws", + }, + }, + }, + "autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "autoscaling-plans": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + + Deprecated: boxedTrue, + }, + }, + }, + "backup": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "backup-gateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "batch": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "batch.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "batch.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "batch.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "batch.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "batch.us-gov-west-1.amazonaws.com", + }, + }, + }, + "bedrock": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "bedrock-fips-us-gov-west-1", + }: endpoint{ + Hostname: "bedrock-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-fips-us-gov-west-1", + }: endpoint{ + Hostname: "bedrock-runtime-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-us-gov-west-1", + }: endpoint{ + Hostname: "bedrock-runtime.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "bedrock-us-gov-west-1", + }: endpoint{ + Hostname: "bedrock.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "cassandra": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "cassandra.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cassandra.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "cassandra.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "cassandra.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cassandra.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "cassandra.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "cloudcontrolapi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-gov-west-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-west-1.api.aws", + }, + }, + }, + "clouddirectory": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "clouddirectory.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "clouddirectory.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "cloudformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "cloudformation.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "cloudformation.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "cloudformation.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "cloudformation.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "cloudhsm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "cloudhsmv2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "cloudtrail": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "cloudtrail.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", + }, + }, + }, + "codebuild": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "codecommit": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "codecommit-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "codedeploy": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "codepipeline": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "codepipeline-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "codestar-connections": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + }, + }, + "cognito-identity": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "cognito-identity-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "cognito-idp": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "comprehend": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "comprehendmedical": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "compute-optimizer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "compute-optimizer-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "compute-optimizer-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "config": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "config.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "config.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "config.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config.us-gov-west-1.amazonaws.com", + }, + }, + }, + "connect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "connect.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "connect.us-gov-west-1.amazonaws.com", + }, + }, + }, + "controltower": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "controltower-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "controltower-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "data-ats.iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "iotdata", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "data.iot-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "data.iot-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "data.jobs.iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "databrew": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "databrew.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "databrew.us-gov-west-1.amazonaws.com", + }, + }, + }, + "datasync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "datasync-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "datazone": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "datazone.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "datazone.us-gov-west-1.api.aws", + }, + }, + }, + "directconnect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "directconnect-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "directconnect-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dlm.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "dlm.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dlm.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "dlm.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "dms": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "dms", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms-fips", + }: endpoint{ + Hostname: "dms.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "dms.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "dms.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "docdb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "drs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "drs-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "drs-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "ds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "ds-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "ds-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "ebs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "ec2.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.us-gov-east-1.api.aws", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "ec2.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.us-gov-west-1.api.aws", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ecs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "eks": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "eks.{region}.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "eks.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "eks.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "eks.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "eks.us-gov-west-1.amazonaws.com", + }, + }, + }, + "eks-auth": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "eks-auth.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "eks-auth.us-gov-west-1.api.aws", + }, + }, + }, + "elasticache": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "elasticache.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "elasticache.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "elasticbeanstalk": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "elasticfilesystem": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "elasticloadbalancing.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "elasticloadbalancing.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + }, + }, + "elasticmapreduce": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-west-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + }, + }, + "email": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "email-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "email-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "emr-containers": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "emr-containers.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "emr-containers.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers.us-gov-west-1.amazonaws.com", + }, + }, + }, + "emr-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "emr-serverless.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "emr-serverless.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless.us-gov-west-1.amazonaws.com", + }, + }, + }, + "es": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "es-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "es-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-gov-west-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "es-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "events": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "events.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "events.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events.us-gov-west-1.amazonaws.com", + }, + }, + }, + "firehose": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "firehose-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "firehose-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "fms": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "fms-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "fms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "fsx": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-prod-us-gov-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-prod-us-gov-west-1", + }: endpoint{ + Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-gov-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-gov-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "geo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "geo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "geo-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "glacier": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "glacier.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "glacier.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + }, + }, + "glue": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "glue-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "glue-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "glue.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "glue-fips.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "glue.us-gov-west-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "glue-fips.us-gov-west-1.api.aws", + }, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "dataplane-us-gov-east-1", + }: endpoint{ + Hostname: "greengrass-ats.iot.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "dataplane-us-gov-west-1", + }: endpoint{ + Hostname: "greengrass-ats.iot.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "greengrass.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "greengrass.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass.us-gov-west-1.amazonaws.com", + }, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "guardduty.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "guardduty.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "health": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "health.us-gov-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ + Hostname: "global.health.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "health-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "health-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "aws-us-gov-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "aws-us-gov-global-fips", + }: endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "iam-govcloud", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "iam-govcloud", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "iam-govcloud-fips", + }: endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "identitystore": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "identitystore.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "identitystore.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "identitystore.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "identitystore.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "identitystore.us-gov-west-1.amazonaws.com", + }, + }, + }, + "ingest.timestream": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ingest.timestream.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "ingest.timestream.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "inspector": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "inspector-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "inspector-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "inspector2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "inspector2-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "inspector2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "internetmonitor": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "internetmonitor.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "internetmonitor.us-gov-west-1.api.aws", + }, + }, + }, + "iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "iot-fips.us-gov-east-1.amazonaws.com", + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "iot-fips.us-gov-west-1.amazonaws.com", + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "iotevents": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "iotevents-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotevents-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "ioteventsdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "data.iotevents-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "data.iotevents.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iotevents-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "iotsecuredtunneling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "iotsitewise": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "iotsitewise-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotsitewise-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "iottwinmaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "api-us-gov-west-1", + }: endpoint{ + Hostname: "api.iottwinmaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "data-us-gov-west-1", + }: endpoint{ + Hostname: "data.iottwinmaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-api-us-gov-west-1", + }: endpoint{ + Hostname: "api.iottwinmaker-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-data-us-gov-west-1", + }: endpoint{ + Hostname: "data.iottwinmaker-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "iottwinmaker-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iottwinmaker-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "kafka": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "kafka.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "kafka.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "kafka.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "kafka.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "kendra": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "kendra-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "kendra-ranking": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "kendra-ranking.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "kendra-ranking.us-gov-west-1.api.aws", + }, + }, + }, + "kinesis": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "kinesis.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "kinesis.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "kinesis.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "kinesis.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "kinesisanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "kinesisvideo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "kms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ProdFips", + }: endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "lakeformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lakeformation.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lakeformation.us-gov-west-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-west-1.api.aws", + }, + }, + }, + "lambda": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "lambda-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "lambda-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-gov-west-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "license-manager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "license-manager-linux-subscriptions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "license-manager-user-subscriptions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "logs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "logs.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "logs.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs.us-gov-west-1.amazonaws.com", + }, + }, + }, + "m2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{}, + }, + }, + "managedblockchain": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "mediaconvert": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "mediaconvert.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert.us-gov-west-1.amazonaws.com", + }, + }, + }, + "meetings-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "metering.marketplace": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "mgn": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "mgn-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "mgn-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "models-v2-lex": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "models.lex": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "models-fips.lex.{region}.{dnsSuffix}", + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "models-fips.lex.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "models-fips.lex.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "monitoring": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "monitoring.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "monitoring.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring.us-gov-west-1.amazonaws.com", + }, + }, + }, + "mq": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "mq-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "mq-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "neptune": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "network-firewall": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "network-firewall-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "network-firewall-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "networkmanager": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ + Hostname: "networkmanager.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "aws-us-gov-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "networkmanager.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-aws-us-gov-global", + }: endpoint{ + Hostname: "networkmanager.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "oidc": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "oidc.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "oidc.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "aws-us-gov-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-aws-us-gov-global", + }: endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "outposts.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "outposts.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts.us-gov-west-1.amazonaws.com", + }, + }, + }, + "participant.connect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "participant.connect.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "participant.connect.us-gov-west-1.amazonaws.com", + }, + }, + }, + "pi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "pi-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "pi-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-gov-east-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-gov-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-gov-east-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-gov-west-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-gov-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-gov-west-1.api.aws", + Protocols: []string{"https"}, + }, + }, + }, + "pinpoint": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "pinpoint-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "pinpoint.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pinpoint-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "polly": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "polly-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "polly-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "portal.sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "portal.sso.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "portal.sso.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "qbusiness": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "qbusiness.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "qbusiness.us-gov-west-1.api.aws", + }, + }, + }, + "quicksight": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "api", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "ram": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "ram.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "ram.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "ram.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "ram.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "rbin": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "rbin-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "rbin-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "rds": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "rds.us-gov-east-1", + }: endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-gov-west-1", + }: endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "redshift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "redshift.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "redshift.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "rekognition": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "rekognition-fips.us-gov-west-1", + }: endpoint{ + Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-gov-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "resiliencehub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "resiliencehub-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "resiliencehub-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resiliencehub-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resiliencehub-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "resource-groups": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "resource-groups.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "resource-groups.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups.us-gov-west-1.amazonaws.com", + }, + }, + }, + "robomaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "rolesanywhere": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ + Hostname: "route53.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "aws-us-gov-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "route53.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-aws-us-gov-global", + }: endpoint{ + Hostname: "route53.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "route53resolver": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "route53resolver.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "route53resolver.us-gov-east-1.amazonaws.com", + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "route53resolver.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "route53resolver.us-gov-west-1.amazonaws.com", + + Deprecated: boxedTrue, + }, + }, + }, + "runtime-v2-lex": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "runtime.lex": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.lex.{region}.{dnsSuffix}", + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.lex.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "runtime-fips.lex.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "runtime.sagemaker": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime.sagemaker.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime.sagemaker.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "runtime.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "s3": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SignatureVersions: []string{"s3", "s3v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "s3-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "s3-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "s3.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "s3.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + }, + }, + "s3-control": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "s3-control.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "s3-control.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "s3-outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + }, + }, + "secretsmanager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + + Deprecated: boxedTrue, + }, + }, + }, + "securityhub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "securitylake": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "securitylake.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "securitylake.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "serverlessrepo": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "servicecatalog": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "servicecatalog-appregistry": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "servicediscovery": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "servicediscovery", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "servicediscovery", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "servicediscovery-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-gov-west-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-west-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "servicequotas": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicequotas.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "servicequotas.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "servicequotas.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicequotas.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicequotas.us-gov-west-1.amazonaws.com", + }, + }, + }, + "signer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "signer-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "signer-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-verification-us-gov-east-1", + }: endpoint{ + Hostname: "verification.signer-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "fips-verification-us-gov-west-1", + }: endpoint{ + Hostname: "verification.signer-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "verification-us-gov-east-1", + }: endpoint{ + Hostname: "verification.signer.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "verification-us-gov-west-1", + }: endpoint{ + Hostname: "verification.signer.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "simspaceweaver": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "simspaceweaver.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "simspaceweaver.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "simspaceweaver.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "simspaceweaver.us-gov-west-1.amazonaws.com", + }, + }, + }, + "sms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "sms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "sms-voice": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "sms-voice-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "sms-voice-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "snowball": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "snowball-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "snowball-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "sns": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "sns.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "sns.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns.us-gov-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + }, + }, + "sqs": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "sqs.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "sqs.us-gov-west-1.amazonaws.com", + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ssm": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "ssm.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "ssm.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm.us-gov-west-1.amazonaws.com", + }, + }, + }, + "sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "sso.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sso.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "sso.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "sso.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sso.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "sso.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "states": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "states-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "states.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states.us-gov-west-1.amazonaws.com", + }, + }, + }, + "storagegateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "streams.dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "streams.dynamodb.{region}.{dnsSuffix}", + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "streams.dynamodb.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "streams.dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "streams.dynamodb.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "streams.dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "sts": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "sts.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "sts.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "support": service{ + PartitionEndpoint: "aws-us-gov-global", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "swf": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "swf.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "swf.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "swf.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "swf.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "synthetics-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "synthetics-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "tagging": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "textract": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "textract-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "textract-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-gov-west-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-gov-west-1.api.aws", + }, + }, + }, + "transcribe": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com", + }, + }, + }, + "transcribestreaming": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "transfer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "transfer-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "transfer-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "translate": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "translate-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "translate-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "verifiedpermissions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "waf-regional": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "waf-regional-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "waf-regional.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "waf-regional.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "wafv2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "wafv2-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "wafv2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "wafv2.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "wafv2.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "wellarchitected": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "workspaces": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "workspaces-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "workspaces-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workspaces-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workspaces-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "xray": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "xray-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "xray-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + }, +} + +// AwsIsoPartition returns the Resolver for AWS ISO (US). +func AwsIsoPartition() Partition { + return awsisoPartition.Partition() +} + +var awsisoPartition = partition{ + ID: "aws-iso", + Name: "AWS ISO (US)", + DNSSuffix: "c2s.ic.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-iso\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{ + "us-iso-east-1": region{ + Description: "US ISO East", + }, + "us-iso-west-1": region{ + Description: "US ISO WEST", + }, + }, + Services: services{ + "api.ecr": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Hostname: "api.ecr.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{ + Hostname: "api.ecr.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, + }, + }, + "api.pricing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "api.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "apigateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "appconfig": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "athena": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "autoscaling": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "cloudcontrolapi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "cloudformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "cloudtrail": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "codedeploy": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "comprehend": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "config": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "datapipeline": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "datasync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "datasync-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "datasync-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-iso-west-1.c2s.ic.gov", + }, + }, + }, + "directconnect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "dms": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "dms", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms-fips", + }: endpoint{ + Hostname: "dms.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-east-1-fips", + }: endpoint{ + Hostname: "dms.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-iso-west-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1-fips", + }: endpoint{ + Hostname: "dms.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "ds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "dynamodb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "ebs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "ec2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "ecs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "eks": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "elasticache": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "elasticfilesystem": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-iso-west-1.c2s.ic.gov", + }, + }, + }, + "elasticloadbalancing": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "elasticmapreduce": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "elasticmapreduce.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "elasticmapreduce.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-iso-east-1.c2s.ic.gov", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-iso-west-1.c2s.ic.gov", + }, + }, + }, + "es": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "events": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "firehose": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "fsx": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-prod-us-iso-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-iso-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov", + }, + }, + }, + "glacier": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "glue": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "health": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-iso-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-global", + }: endpoint{ + Hostname: "iam.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "kinesis": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "kms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ProdFips", + }: endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-east-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-iso-west-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "lambda": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "license-manager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "logs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "medialive": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "mediapackage": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "monitoring": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "ram": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "rbin": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "rbin-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "rbin-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-iso-west-1.c2s.ic.gov", + }, + }, + }, + "rds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "rds.us-iso-east-1", + }: endpoint{ + Hostname: "rds.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-iso-west-1", + }: endpoint{ + Hostname: "rds.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-east-1-fips", + }: endpoint{ + Hostname: "rds.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds.us-iso-west-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1-fips", + }: endpoint{ + Hostname: "rds.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "redshift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Hostname: "redshift.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{ + Hostname: "redshift.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, + }, + }, + "resource-groups": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-iso-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-global", + }: endpoint{ + Hostname: "route53.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "route53resolver": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "runtime.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "s3": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-iso-east-1.c2s.ic.gov", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-iso-west-1.c2s.ic.gov", + }, + }, + }, + "s3-control": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Hostname: "s3-control.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + endpointKey{ + Region: "us-iso-east-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{ + Hostname: "s3-control.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, + endpointKey{ + Region: "us-iso-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, + endpointKey{ + Region: "us-iso-west-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "s3-outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{}, + }, + }, + "secretsmanager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "snowball": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "sns": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "sqs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "ssm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "states": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "sts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-iso-global", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-global", + }: endpoint{ + Hostname: "support.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "swf": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "tagging": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "textract": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "transcribestreaming": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "translate": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "workspaces": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + }, +} + +// AwsIsoBPartition returns the Resolver for AWS ISOB (US). +func AwsIsoBPartition() Partition { + return awsisobPartition.Partition() +} + +var awsisobPartition = partition{ + ID: "aws-iso-b", + Name: "AWS ISOB (US)", + DNSSuffix: "sc2s.sgov.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-isob\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{ + "us-isob-east-1": region{ + Description: "US ISOB East (Ohio)", + }, + }, + Services: services{ + "api.ecr": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{ + Hostname: "api.ecr.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "api.pricing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "api.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "apigateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "appconfig": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "cloudcontrolapi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "cloudformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "cloudtrail": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "codedeploy": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "config": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "directconnect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "dms": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "dms", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms-fips", + }: endpoint{ + Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1-fips", + }: endpoint{ + Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "ds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "ebs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "ecs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "eks": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "elasticache": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "elasticfilesystem": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-isob-east-1.sc2s.sgov.gov", + }, + }, + }, + "elasticloadbalancing": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "elasticmapreduce": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "elasticmapreduce.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-isob-east-1.sc2s.sgov.gov", + }, + }, + }, + "es": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "events": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "firehose": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "glacier": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "health": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-iso-b-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-b-global", + }: endpoint{ + Hostname: "iam.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "kinesis": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "kms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ProdFips", + }: endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "lambda": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "license-manager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "logs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "medialive": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "mediapackage": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "metering.marketplace": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "monitoring": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "ram": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "rbin": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "rbin-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-isob-east-1.sc2s.sgov.gov", + }, + }, + }, + "rds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "rds.us-isob-east-1", + }: endpoint{ + Hostname: "rds.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1-fips", + }: endpoint{ + Hostname: "rds.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "redshift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{ + Hostname: "redshift.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "resource-groups": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-iso-b-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-b-global", + }: endpoint{ + Hostname: "route53.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "route53resolver": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "runtime.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "s3": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-isob-east-1.sc2s.sgov.gov", + }, + }, + }, + "s3-control": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{ + Hostname: "s3-control.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + endpointKey{ + Region: "us-isob-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + endpointKey{ + Region: "us-isob-east-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "s3-outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{}, + }, + }, + "secretsmanager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "snowball": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "sns": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "ssm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "states": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "storagegateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "streams.dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "sts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-iso-b-global", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-b-global", + }: endpoint{ + Hostname: "support.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "swf": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "tagging": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "workspaces": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + }, +} + +// AwsIsoEPartition returns the Resolver for AWS ISOE (Europe). +func AwsIsoEPartition() Partition { + return awsisoePartition.Partition() +} + +var awsisoePartition = partition{ + ID: "aws-iso-e", + Name: "AWS ISOE (Europe)", + DNSSuffix: "cloud.adc-e.uk", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^eu\\-isoe\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{ + "eu-isoe-west-1": region{ + Description: "EU ISOE West", + }, + }, + Services: services{}, +} + +// AwsIsoFPartition returns the Resolver for AWS ISOF. +func AwsIsoFPartition() Partition { + return awsisofPartition.Partition() +} + +var awsisofPartition = partition{ + ID: "aws-iso-f", + Name: "AWS ISOF", + DNSSuffix: "csp.hci.ic.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-isof\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{}, + Services: services{}, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go new file mode 100644 index 000000000..ca8fc828e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go @@ -0,0 +1,141 @@ +package endpoints + +// Service identifiers +// +// Deprecated: Use client package's EndpointsID value instead of these +// ServiceIDs. These IDs are not maintained, and are out of date. +const ( + A4bServiceID = "a4b" // A4b. + AcmServiceID = "acm" // Acm. + AcmPcaServiceID = "acm-pca" // AcmPca. + ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. + ApiPricingServiceID = "api.pricing" // ApiPricing. + ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker. + ApigatewayServiceID = "apigateway" // Apigateway. + ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. + Appstream2ServiceID = "appstream2" // Appstream2. + AppsyncServiceID = "appsync" // Appsync. + AthenaServiceID = "athena" // Athena. + AutoscalingServiceID = "autoscaling" // Autoscaling. + AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. + BatchServiceID = "batch" // Batch. + BudgetsServiceID = "budgets" // Budgets. + CeServiceID = "ce" // Ce. + ChimeServiceID = "chime" // Chime. + Cloud9ServiceID = "cloud9" // Cloud9. + ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. + CloudformationServiceID = "cloudformation" // Cloudformation. + CloudfrontServiceID = "cloudfront" // Cloudfront. + CloudhsmServiceID = "cloudhsm" // Cloudhsm. + Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. + CloudsearchServiceID = "cloudsearch" // Cloudsearch. + CloudtrailServiceID = "cloudtrail" // Cloudtrail. + CodebuildServiceID = "codebuild" // Codebuild. + CodecommitServiceID = "codecommit" // Codecommit. + CodedeployServiceID = "codedeploy" // Codedeploy. + CodepipelineServiceID = "codepipeline" // Codepipeline. + CodestarServiceID = "codestar" // Codestar. + CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. + CognitoIdpServiceID = "cognito-idp" // CognitoIdp. + CognitoSyncServiceID = "cognito-sync" // CognitoSync. + ComprehendServiceID = "comprehend" // Comprehend. + ConfigServiceID = "config" // Config. + CurServiceID = "cur" // Cur. + DatapipelineServiceID = "datapipeline" // Datapipeline. + DaxServiceID = "dax" // Dax. + DevicefarmServiceID = "devicefarm" // Devicefarm. + DirectconnectServiceID = "directconnect" // Directconnect. + DiscoveryServiceID = "discovery" // Discovery. + DmsServiceID = "dms" // Dms. + DsServiceID = "ds" // Ds. + DynamodbServiceID = "dynamodb" // Dynamodb. + Ec2ServiceID = "ec2" // Ec2. + Ec2metadataServiceID = "ec2metadata" // Ec2metadata. + EcrServiceID = "ecr" // Ecr. + EcsServiceID = "ecs" // Ecs. + ElasticacheServiceID = "elasticache" // Elasticache. + ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. + ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. + ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. + ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. + ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. + EmailServiceID = "email" // Email. + EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. + EsServiceID = "es" // Es. + EventsServiceID = "events" // Events. + FirehoseServiceID = "firehose" // Firehose. + FmsServiceID = "fms" // Fms. + GameliftServiceID = "gamelift" // Gamelift. + GlacierServiceID = "glacier" // Glacier. + GlueServiceID = "glue" // Glue. + GreengrassServiceID = "greengrass" // Greengrass. + GuarddutyServiceID = "guardduty" // Guardduty. + HealthServiceID = "health" // Health. + IamServiceID = "iam" // Iam. + ImportexportServiceID = "importexport" // Importexport. + InspectorServiceID = "inspector" // Inspector. + IotServiceID = "iot" // Iot. + IotanalyticsServiceID = "iotanalytics" // Iotanalytics. + KinesisServiceID = "kinesis" // Kinesis. + KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. + KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. + KmsServiceID = "kms" // Kms. + LambdaServiceID = "lambda" // Lambda. + LightsailServiceID = "lightsail" // Lightsail. + LogsServiceID = "logs" // Logs. + MachinelearningServiceID = "machinelearning" // Machinelearning. + MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. + MediaconvertServiceID = "mediaconvert" // Mediaconvert. + MedialiveServiceID = "medialive" // Medialive. + MediapackageServiceID = "mediapackage" // Mediapackage. + MediastoreServiceID = "mediastore" // Mediastore. + MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. + MghServiceID = "mgh" // Mgh. + MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. + ModelsLexServiceID = "models.lex" // ModelsLex. + MonitoringServiceID = "monitoring" // Monitoring. + MturkRequesterServiceID = "mturk-requester" // MturkRequester. + NeptuneServiceID = "neptune" // Neptune. + OpsworksServiceID = "opsworks" // Opsworks. + OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. + OrganizationsServiceID = "organizations" // Organizations. + PinpointServiceID = "pinpoint" // Pinpoint. + PollyServiceID = "polly" // Polly. + RdsServiceID = "rds" // Rds. + RedshiftServiceID = "redshift" // Redshift. + RekognitionServiceID = "rekognition" // Rekognition. + ResourceGroupsServiceID = "resource-groups" // ResourceGroups. + Route53ServiceID = "route53" // Route53. + Route53domainsServiceID = "route53domains" // Route53domains. + RuntimeLexServiceID = "runtime.lex" // RuntimeLex. + RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. + S3ServiceID = "s3" // S3. + S3ControlServiceID = "s3-control" // S3Control. + SagemakerServiceID = "api.sagemaker" // Sagemaker. + SdbServiceID = "sdb" // Sdb. + SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. + ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. + ServicecatalogServiceID = "servicecatalog" // Servicecatalog. + ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. + ShieldServiceID = "shield" // Shield. + SmsServiceID = "sms" // Sms. + SnowballServiceID = "snowball" // Snowball. + SnsServiceID = "sns" // Sns. + SqsServiceID = "sqs" // Sqs. + SsmServiceID = "ssm" // Ssm. + StatesServiceID = "states" // States. + StoragegatewayServiceID = "storagegateway" // Storagegateway. + StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. + StsServiceID = "sts" // Sts. + SupportServiceID = "support" // Support. + SwfServiceID = "swf" // Swf. + TaggingServiceID = "tagging" // Tagging. + TransferServiceID = "transfer" // Transfer. + TranslateServiceID = "translate" // Translate. + WafServiceID = "waf" // Waf. + WafRegionalServiceID = "waf-regional" // WafRegional. + WorkdocsServiceID = "workdocs" // Workdocs. + WorkmailServiceID = "workmail" // Workmail. + WorkspacesServiceID = "workspaces" // Workspaces. + XrayServiceID = "xray" // Xray. +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go new file mode 100644 index 000000000..66dec6beb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go @@ -0,0 +1,65 @@ +// Package endpoints provides the types and functionality for defining regions +// and endpoints, as well as querying those definitions. +// +// The SDK's Regions and Endpoints metadata is code generated into the endpoints +// package, and is accessible via the DefaultResolver function. This function +// returns a endpoint Resolver will search the metadata and build an associated +// endpoint if one is found. The default resolver will search all partitions +// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and +// AWS GovCloud (US) (aws-us-gov). +// . +// +// # Enumerating Regions and Endpoint Metadata +// +// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface +// will allow you to get access to the list of underlying Partitions with the +// Partitions method. This is helpful if you want to limit the SDK's endpoint +// resolving to a single partition, or enumerate regions, services, and endpoints +// in the partition. +// +// resolver := endpoints.DefaultResolver() +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// +// for _, p := range partitions { +// fmt.Println("Regions for", p.ID()) +// for id, _ := range p.Regions() { +// fmt.Println("*", id) +// } +// +// fmt.Println("Services for", p.ID()) +// for id, _ := range p.Services() { +// fmt.Println("*", id) +// } +// } +// +// # Using Custom Endpoints +// +// The endpoints package also gives you the ability to use your own logic how +// endpoints are resolved. This is a great way to define a custom endpoint +// for select services, without passing that logic down through your code. +// +// If a type implements the Resolver interface it can be used to resolve +// endpoints. To use this with the SDK's Session and Config set the value +// of the type to the EndpointsResolver field of aws.Config when initializing +// the session, or service client. +// +// In addition the ResolverFunc is a wrapper for a func matching the signature +// of Resolver.EndpointFor, converting it to a type that satisfies the +// Resolver interface. +// +// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { +// if service == endpoints.S3ServiceID { +// return endpoints.ResolvedEndpoint{ +// URL: "s3.custom.endpoint.com", +// SigningRegion: "custom-signing-region", +// }, nil +// } +// +// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) +// } +// +// sess := session.Must(session.NewSession(&aws.Config{ +// Region: aws.String("us-west-2"), +// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), +// })) +package endpoints diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go new file mode 100644 index 000000000..a686a48fa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -0,0 +1,708 @@ +package endpoints + +import ( + "fmt" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// A Logger is a minimalistic interface for the SDK to log messages to. +type Logger interface { + Log(...interface{}) +} + +// DualStackEndpointState is a constant to describe the dual-stack endpoint resolution +// behavior. +type DualStackEndpointState uint + +const ( + // DualStackEndpointStateUnset is the default value behavior for dual-stack endpoint + // resolution. + DualStackEndpointStateUnset DualStackEndpointState = iota + + // DualStackEndpointStateEnabled enable dual-stack endpoint resolution for endpoints. + DualStackEndpointStateEnabled + + // DualStackEndpointStateDisabled disables dual-stack endpoint resolution for endpoints. + DualStackEndpointStateDisabled +) + +// FIPSEndpointState is a constant to describe the FIPS endpoint resolution behavior. +type FIPSEndpointState uint + +const ( + // FIPSEndpointStateUnset is the default value behavior for FIPS endpoint resolution. + FIPSEndpointStateUnset FIPSEndpointState = iota + + // FIPSEndpointStateEnabled enables FIPS endpoint resolution for service endpoints. + FIPSEndpointStateEnabled + + // FIPSEndpointStateDisabled disables FIPS endpoint resolution for endpoints. + FIPSEndpointStateDisabled +) + +// Options provide the configuration needed to direct how the +// endpoints will be resolved. +type Options struct { + // DisableSSL forces the endpoint to be resolved as HTTP. + // instead of HTTPS if the service supports it. + DisableSSL bool + + // Sets the resolver to resolve the endpoint as a dualstack endpoint + // for the service. If dualstack support for a service is not known and + // StrictMatching is not enabled a dualstack endpoint for the service will + // be returned. This endpoint may not be valid. If StrictMatching is + // enabled only services that are known to support dualstack will return + // dualstack endpoints. + // + // Deprecated: This option will continue to function for S3 and S3 Control for backwards compatibility. + // UseDualStackEndpoint should be used to enable usage of a service's dual-stack endpoint for all service clients + // moving forward. For S3 and S3 Control, when UseDualStackEndpoint is set to a non-zero value it takes higher + // precedence then this option. + UseDualStack bool + + // Sets the resolver to resolve a dual-stack endpoint for the service. + UseDualStackEndpoint DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint FIPSEndpointState + + // Enables strict matching of services and regions resolved endpoints. + // If the partition doesn't enumerate the exact service and region an + // error will be returned. This option will prevent returning endpoints + // that look valid, but may not resolve to any real endpoint. + StrictMatching bool + + // Enables resolving a service endpoint based on the region provided if the + // service does not exist. The service endpoint ID will be used as the service + // domain name prefix. By default the endpoint resolver requires the service + // to be known when resolving endpoints. + // + // If resolving an endpoint on the partition list the provided region will + // be used to determine which partition's domain name pattern to the service + // endpoint ID with. If both the service and region are unknown and resolving + // the endpoint on partition list an UnknownEndpointError error will be returned. + // + // If resolving and endpoint on a partition specific resolver that partition's + // domain name pattern will be used with the service endpoint ID. If both + // region and service do not exist when resolving an endpoint on a specific + // partition the partition's domain pattern will be used to combine the + // endpoint and region together. + // + // This option is ignored if StrictMatching is enabled. + ResolveUnknownService bool + + // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) + EC2MetadataEndpointMode EC2IMDSEndpointModeState + + // STS Regional Endpoint flag helps with resolving the STS endpoint + STSRegionalEndpoint STSRegionalEndpoint + + // S3 Regional Endpoint flag helps with resolving the S3 endpoint + S3UsEast1RegionalEndpoint S3UsEast1RegionalEndpoint + + // ResolvedRegion is the resolved region string. If provided (non-zero length) it takes priority + // over the region name passed to the ResolveEndpoint call. + ResolvedRegion string + + // Logger is the logger that will be used to log messages. + Logger Logger + + // Determines whether logging of deprecated endpoints usage is enabled. + LogDeprecated bool +} + +func (o Options) getEndpointVariant(service string) (v endpointVariant) { + const s3 = "s3" + const s3Control = "s3-control" + + if (o.UseDualStackEndpoint == DualStackEndpointStateEnabled) || + ((service == s3 || service == s3Control) && (o.UseDualStackEndpoint == DualStackEndpointStateUnset && o.UseDualStack)) { + v |= dualStackVariant + } + if o.UseFIPSEndpoint == FIPSEndpointStateEnabled { + v |= fipsVariant + } + return v +} + +// EC2IMDSEndpointModeState is an enum configuration variable describing the client endpoint mode. +type EC2IMDSEndpointModeState uint + +// Enumeration values for EC2IMDSEndpointModeState +const ( + EC2IMDSEndpointModeStateUnset EC2IMDSEndpointModeState = iota + EC2IMDSEndpointModeStateIPv4 + EC2IMDSEndpointModeStateIPv6 +) + +// SetFromString sets the EC2IMDSEndpointModeState based on the provided string value. Unknown values will default to EC2IMDSEndpointModeStateUnset +func (e *EC2IMDSEndpointModeState) SetFromString(v string) error { + v = strings.TrimSpace(v) + + switch { + case len(v) == 0: + *e = EC2IMDSEndpointModeStateUnset + case strings.EqualFold(v, "IPv6"): + *e = EC2IMDSEndpointModeStateIPv6 + case strings.EqualFold(v, "IPv4"): + *e = EC2IMDSEndpointModeStateIPv4 + default: + return fmt.Errorf("unknown EC2 IMDS endpoint mode, must be either IPv6 or IPv4") + } + return nil +} + +// STSRegionalEndpoint is an enum for the states of the STS Regional Endpoint +// options. +type STSRegionalEndpoint int + +func (e STSRegionalEndpoint) String() string { + switch e { + case LegacySTSEndpoint: + return "legacy" + case RegionalSTSEndpoint: + return "regional" + case UnsetSTSEndpoint: + return "" + default: + return "unknown" + } +} + +const ( + + // UnsetSTSEndpoint represents that STS Regional Endpoint flag is not specified. + UnsetSTSEndpoint STSRegionalEndpoint = iota + + // LegacySTSEndpoint represents when STS Regional Endpoint flag is specified + // to use legacy endpoints. + LegacySTSEndpoint + + // RegionalSTSEndpoint represents when STS Regional Endpoint flag is specified + // to use regional endpoints. + RegionalSTSEndpoint +) + +// GetSTSRegionalEndpoint function returns the STSRegionalEndpointFlag based +// on the input string provided in env config or shared config by the user. +// +// `legacy`, `regional` are the only case-insensitive valid strings for +// resolving the STS regional Endpoint flag. +func GetSTSRegionalEndpoint(s string) (STSRegionalEndpoint, error) { + switch { + case strings.EqualFold(s, "legacy"): + return LegacySTSEndpoint, nil + case strings.EqualFold(s, "regional"): + return RegionalSTSEndpoint, nil + default: + return UnsetSTSEndpoint, fmt.Errorf("unable to resolve the value of STSRegionalEndpoint for %v", s) + } +} + +// S3UsEast1RegionalEndpoint is an enum for the states of the S3 us-east-1 +// Regional Endpoint options. +type S3UsEast1RegionalEndpoint int + +func (e S3UsEast1RegionalEndpoint) String() string { + switch e { + case LegacyS3UsEast1Endpoint: + return "legacy" + case RegionalS3UsEast1Endpoint: + return "regional" + case UnsetS3UsEast1Endpoint: + return "" + default: + return "unknown" + } +} + +const ( + + // UnsetS3UsEast1Endpoint represents that S3 Regional Endpoint flag is not + // specified. + UnsetS3UsEast1Endpoint S3UsEast1RegionalEndpoint = iota + + // LegacyS3UsEast1Endpoint represents when S3 Regional Endpoint flag is + // specified to use legacy endpoints. + LegacyS3UsEast1Endpoint + + // RegionalS3UsEast1Endpoint represents when S3 Regional Endpoint flag is + // specified to use regional endpoints. + RegionalS3UsEast1Endpoint +) + +// GetS3UsEast1RegionalEndpoint function returns the S3UsEast1RegionalEndpointFlag based +// on the input string provided in env config or shared config by the user. +// +// `legacy`, `regional` are the only case-insensitive valid strings for +// resolving the S3 regional Endpoint flag. +func GetS3UsEast1RegionalEndpoint(s string) (S3UsEast1RegionalEndpoint, error) { + switch { + case strings.EqualFold(s, "legacy"): + return LegacyS3UsEast1Endpoint, nil + case strings.EqualFold(s, "regional"): + return RegionalS3UsEast1Endpoint, nil + default: + return UnsetS3UsEast1Endpoint, + fmt.Errorf("unable to resolve the value of S3UsEast1RegionalEndpoint for %v", s) + } +} + +// Set combines all of the option functions together. +func (o *Options) Set(optFns ...func(*Options)) { + for _, fn := range optFns { + fn(o) + } +} + +// DisableSSLOption sets the DisableSSL options. Can be used as a functional +// option when resolving endpoints. +func DisableSSLOption(o *Options) { + o.DisableSSL = true +} + +// UseDualStackOption sets the UseDualStack option. Can be used as a functional +// option when resolving endpoints. +// +// Deprecated: UseDualStackEndpointOption should be used to enable usage of a service's dual-stack endpoint. +// When DualStackEndpointState is set to a non-zero value it takes higher precedence then this option. +func UseDualStackOption(o *Options) { + o.UseDualStack = true +} + +// UseDualStackEndpointOption sets the UseDualStackEndpoint option to enabled. Can be used as a functional +// option when resolving endpoints. +func UseDualStackEndpointOption(o *Options) { + o.UseDualStackEndpoint = DualStackEndpointStateEnabled +} + +// UseFIPSEndpointOption sets the UseFIPSEndpoint option to enabled. Can be used as a functional +// option when resolving endpoints. +func UseFIPSEndpointOption(o *Options) { + o.UseFIPSEndpoint = FIPSEndpointStateEnabled +} + +// StrictMatchingOption sets the StrictMatching option. Can be used as a functional +// option when resolving endpoints. +func StrictMatchingOption(o *Options) { + o.StrictMatching = true +} + +// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used +// as a functional option when resolving endpoints. +func ResolveUnknownServiceOption(o *Options) { + o.ResolveUnknownService = true +} + +// STSRegionalEndpointOption enables the STS endpoint resolver behavior to resolve +// STS endpoint to their regional endpoint, instead of the global endpoint. +func STSRegionalEndpointOption(o *Options) { + o.STSRegionalEndpoint = RegionalSTSEndpoint +} + +// A Resolver provides the interface for functionality to resolve endpoints. +// The build in Partition and DefaultResolver return value satisfy this interface. +type Resolver interface { + EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) +} + +// ResolverFunc is a helper utility that wraps a function so it satisfies the +// Resolver interface. This is useful when you want to add additional endpoint +// resolving logic, or stub out specific endpoints with custom values. +type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) + +// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface. +func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return fn(service, region, opts...) +} + +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no +// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS. +// +// If disableSSL is set, it will only set the URL's scheme if the URL does not +// contain a scheme. +func AddScheme(endpoint string, disableSSL bool) string { + if !schemeRE.MatchString(endpoint) { + scheme := "https" + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + return endpoint +} + +// EnumPartitions a provides a way to retrieve the underlying partitions that +// make up the SDK's default Resolver, or any resolver decoded from a model +// file. +// +// Use this interface with DefaultResolver and DecodeModels to get the list of +// Partitions. +type EnumPartitions interface { + Partitions() []Partition +} + +// RegionsForService returns a map of regions for the partition and service. +// If either the partition or service does not exist false will be returned +// as the second parameter. +// +// This example shows how to get the regions for DynamoDB in the AWS partition. +// +// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) +// +// This is equivalent to using the partition directly. +// +// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() +func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) { + for _, p := range ps { + if p.ID() != partitionID { + continue + } + if _, ok := p.p.Services[serviceID]; !(ok || serviceID == Ec2metadataServiceID) { + break + } + + s := Service{ + id: serviceID, + p: p.p, + } + return s.Regions(), true + } + + return map[string]Region{}, false +} + +// PartitionForRegion returns the first partition which includes the region +// passed in. This includes both known regions and regions which match +// a pattern supported by the partition which may include regions that are +// not explicitly known by the partition. Use the Regions method of the +// returned Partition if explicit support is needed. +func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { + for _, p := range ps { + if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) { + return p, true + } + } + + return Partition{}, false +} + +// A Partition provides the ability to enumerate the partition's regions +// and services. +type Partition struct { + id, dnsSuffix string + p *partition +} + +// DNSSuffix returns the base domain name of the partition. +func (p Partition) DNSSuffix() string { return p.dnsSuffix } + +// ID returns the identifier of the partition. +func (p Partition) ID() string { return p.id } + +// EndpointFor attempts to resolve the endpoint based on service and region. +// See Options for information on configuring how the endpoint is resolved. +// +// If the service cannot be found in the metadata the UnknownServiceError +// error will be returned. This validation will occur regardless if +// StrictMatching is enabled. To enable resolving unknown services set the +// "ResolveUnknownService" option to true. When StrictMatching is disabled +// this option allows the partition resolver to resolve a endpoint based on +// the service endpoint ID provided. +// +// When resolving endpoints you can choose to enable StrictMatching. This will +// require the provided service and region to be known by the partition. +// If the endpoint cannot be strictly resolved an error will be returned. This +// mode is useful to ensure the endpoint resolved is valid. Without +// StrictMatching enabled the endpoint returned may look valid but may not work. +// StrictMatching requires the SDK to be updated if you want to take advantage +// of new regions and services expansions. +// +// Errors that can be returned. +// - UnknownServiceError +// - UnknownEndpointError +func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return p.p.EndpointFor(service, region, opts...) +} + +// Regions returns a map of Regions indexed by their ID. This is useful for +// enumerating over the regions in a partition. +func (p Partition) Regions() map[string]Region { + rs := make(map[string]Region, len(p.p.Regions)) + for id, r := range p.p.Regions { + rs[id] = Region{ + id: id, + desc: r.Description, + p: p.p, + } + } + + return rs +} + +// Services returns a map of Service indexed by their ID. This is useful for +// enumerating over the services in a partition. +func (p Partition) Services() map[string]Service { + ss := make(map[string]Service, len(p.p.Services)) + + for id := range p.p.Services { + ss[id] = Service{ + id: id, + p: p.p, + } + } + + // Since we have removed the customization that injected this into the model + // we still need to pretend that this is a modeled service. + if _, ok := ss[Ec2metadataServiceID]; !ok { + ss[Ec2metadataServiceID] = Service{ + id: Ec2metadataServiceID, + p: p.p, + } + } + + return ss +} + +// A Region provides information about a region, and ability to resolve an +// endpoint from the context of a region, given a service. +type Region struct { + id, desc string + p *partition +} + +// ID returns the region's identifier. +func (r Region) ID() string { return r.id } + +// Description returns the region's description. The region description +// is free text, it can be empty, and it may change between SDK releases. +func (r Region) Description() string { return r.desc } + +// ResolveEndpoint resolves an endpoint from the context of the region given +// a service. See Partition.EndpointFor for usage and errors that can be returned. +func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return r.p.EndpointFor(service, r.id, opts...) +} + +// Services returns a list of all services that are known to be in this region. +func (r Region) Services() map[string]Service { + ss := map[string]Service{} + for id, s := range r.p.Services { + if _, ok := s.Endpoints[endpointKey{Region: r.id}]; ok { + ss[id] = Service{ + id: id, + p: r.p, + } + } + } + + return ss +} + +// A Service provides information about a service, and ability to resolve an +// endpoint from the context of a service, given a region. +type Service struct { + id string + p *partition +} + +// ID returns the identifier for the service. +func (s Service) ID() string { return s.id } + +// ResolveEndpoint resolves an endpoint from the context of a service given +// a region. See Partition.EndpointFor for usage and errors that can be returned. +func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return s.p.EndpointFor(s.id, region, opts...) +} + +// Regions returns a map of Regions that the service is present in. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Regions() map[string]Region { + rs := map[string]Region{} + + service, ok := s.p.Services[s.id] + + // Since ec2metadata customization has been removed we need to check + // if it was defined in non-standard endpoints.json file. If it's not + // then we can return the empty map as there is no regional-endpoints for IMDS. + // Otherwise, we iterate need to iterate the non-standard model. + if s.id == Ec2metadataServiceID && !ok { + return rs + } + + for id := range service.Endpoints { + if id.Variant != 0 { + continue + } + if r, ok := s.p.Regions[id.Region]; ok { + rs[id.Region] = Region{ + id: id.Region, + desc: r.Description, + p: s.p, + } + } + } + + return rs +} + +// Endpoints returns a map of Endpoints indexed by their ID for all known +// endpoints for a service. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Endpoints() map[string]Endpoint { + es := make(map[string]Endpoint, len(s.p.Services[s.id].Endpoints)) + for id := range s.p.Services[s.id].Endpoints { + if id.Variant != 0 { + continue + } + es[id.Region] = Endpoint{ + id: id.Region, + serviceID: s.id, + p: s.p, + } + } + + return es +} + +// A Endpoint provides information about endpoints, and provides the ability +// to resolve that endpoint for the service, and the region the endpoint +// represents. +type Endpoint struct { + id string + serviceID string + p *partition +} + +// ID returns the identifier for an endpoint. +func (e Endpoint) ID() string { return e.id } + +// ServiceID returns the identifier the endpoint belongs to. +func (e Endpoint) ServiceID() string { return e.serviceID } + +// ResolveEndpoint resolves an endpoint from the context of a service and +// region the endpoint represents. See Partition.EndpointFor for usage and +// errors that can be returned. +func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { + return e.p.EndpointFor(e.serviceID, e.id, opts...) +} + +// A ResolvedEndpoint is an endpoint that has been resolved based on a partition +// service, and region. +type ResolvedEndpoint struct { + // The endpoint URL + URL string + + // The endpoint partition + PartitionID string + + // The region that should be used for signing requests. + SigningRegion string + + // The service name that should be used for signing requests. + SigningName string + + // States that the signing name for this endpoint was derived from metadata + // passed in, but was not explicitly modeled. + SigningNameDerived bool + + // The signing method that should be used for signing requests. + SigningMethod string +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError awserr.Error + +// A EndpointNotFoundError is returned when in StrictMatching mode, and the +// endpoint for the service and region cannot be found in any of the partitions. +type EndpointNotFoundError struct { + awsError + Partition string + Service string + Region string +} + +// A UnknownServiceError is returned when the service does not resolve to an +// endpoint. Includes a list of all known services for the partition. Returned +// when a partition does not support the service. +type UnknownServiceError struct { + awsError + Partition string + Service string + Known []string +} + +// NewUnknownServiceError builds and returns UnknownServiceError. +func NewUnknownServiceError(p, s string, known []string) UnknownServiceError { + return UnknownServiceError{ + awsError: awserr.New("UnknownServiceError", + "could not resolve endpoint for unknown service", nil), + Partition: p, + Service: s, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownServiceError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q", + e.Partition, e.Service) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownServiceError) String() string { + return e.Error() +} + +// A UnknownEndpointError is returned when in StrictMatching mode and the +// service is valid, but the region does not resolve to an endpoint. Includes +// a list of all known endpoints for the service. +type UnknownEndpointError struct { + awsError + Partition string + Service string + Region string + Known []string +} + +// NewUnknownEndpointError builds and returns UnknownEndpointError. +func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError { + return UnknownEndpointError{ + awsError: awserr.New("UnknownEndpointError", + "could not resolve endpoint", nil), + Partition: p, + Service: s, + Region: r, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q, region: %q", + e.Partition, e.Service, e.Region) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) String() string { + return e.Error() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go new file mode 100644 index 000000000..df75e899a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go @@ -0,0 +1,24 @@ +package endpoints + +var legacyGlobalRegions = map[string]map[string]struct{}{ + "sts": { + "ap-northeast-1": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-north-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "eu-west-3": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {}, + }, + "s3": { + "us-east-1": {}, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go new file mode 100644 index 000000000..89f6627dc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -0,0 +1,594 @@ +package endpoints + +import ( + "encoding/json" + "fmt" + "regexp" + "strconv" + "strings" +) + +const ( + ec2MetadataEndpointIPv6 = "http://[fd00:ec2::254]/latest" + ec2MetadataEndpointIPv4 = "http://169.254.169.254/latest" +) + +const dnsSuffixTemplateKey = "{dnsSuffix}" + +// defaultKey is a compound map key of a variant and other values. +type defaultKey struct { + Variant endpointVariant + ServiceVariant serviceVariant +} + +// endpointKey is a compound map key of a region and associated variant value. +type endpointKey struct { + Region string + Variant endpointVariant +} + +// endpointVariant is a bit field to describe the endpoints attributes. +type endpointVariant uint64 + +// serviceVariant is a bit field to describe the service endpoint attributes. +type serviceVariant uint64 + +const ( + // fipsVariant indicates that the endpoint is FIPS capable. + fipsVariant endpointVariant = 1 << (64 - 1 - iota) + + // dualStackVariant indicates that the endpoint is DualStack capable. + dualStackVariant +) + +var regionValidationRegex = regexp.MustCompile(`^[[:alnum:]]([[:alnum:]\-]*[[:alnum:]])?$`) + +type partitions []partition + +func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + var opt Options + opt.Set(opts...) + + if len(opt.ResolvedRegion) > 0 { + region = opt.ResolvedRegion + } + + for i := 0; i < len(ps); i++ { + if !ps[i].canResolveEndpoint(service, region, opt) { + continue + } + + return ps[i].EndpointFor(service, region, opts...) + } + + // If loose matching fallback to first partition format to use + // when resolving the endpoint. + if !opt.StrictMatching && len(ps) > 0 { + return ps[0].EndpointFor(service, region, opts...) + } + + return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{}) +} + +// Partitions satisfies the EnumPartitions interface and returns a list +// of Partitions representing each partition represented in the SDK's +// endpoints model. +func (ps partitions) Partitions() []Partition { + parts := make([]Partition, 0, len(ps)) + for i := 0; i < len(ps); i++ { + parts = append(parts, ps[i].Partition()) + } + + return parts +} + +type endpointWithVariants struct { + endpoint + Variants []endpointWithTags `json:"variants"` +} + +type endpointWithTags struct { + endpoint + Tags []string `json:"tags"` +} + +type endpointDefaults map[defaultKey]endpoint + +func (p *endpointDefaults) UnmarshalJSON(data []byte) error { + if *p == nil { + *p = make(endpointDefaults) + } + + var e endpointWithVariants + if err := json.Unmarshal(data, &e); err != nil { + return err + } + + (*p)[defaultKey{Variant: 0}] = e.endpoint + + e.Hostname = "" + e.DNSSuffix = "" + + for _, variant := range e.Variants { + endpointVariant, unknown := parseVariantTags(variant.Tags) + if unknown { + continue + } + + var ve endpoint + ve.mergeIn(e.endpoint) + ve.mergeIn(variant.endpoint) + + (*p)[defaultKey{Variant: endpointVariant}] = ve + } + + return nil +} + +func parseVariantTags(tags []string) (ev endpointVariant, unknown bool) { + if len(tags) == 0 { + unknown = true + return + } + + for _, tag := range tags { + switch { + case strings.EqualFold("fips", tag): + ev |= fipsVariant + case strings.EqualFold("dualstack", tag): + ev |= dualStackVariant + default: + unknown = true + } + } + return ev, unknown +} + +type partition struct { + ID string `json:"partition"` + Name string `json:"partitionName"` + DNSSuffix string `json:"dnsSuffix"` + RegionRegex regionRegex `json:"regionRegex"` + Defaults endpointDefaults `json:"defaults"` + Regions regions `json:"regions"` + Services services `json:"services"` +} + +func (p partition) Partition() Partition { + return Partition{ + dnsSuffix: p.DNSSuffix, + id: p.ID, + p: &p, + } +} + +func (p partition) canResolveEndpoint(service, region string, options Options) bool { + s, hasService := p.Services[service] + _, hasEndpoint := s.Endpoints[endpointKey{ + Region: region, + Variant: options.getEndpointVariant(service), + }] + + if hasEndpoint && hasService { + return true + } + + if options.StrictMatching { + return false + } + + return p.RegionRegex.MatchString(region) +} + +func allowLegacyEmptyRegion(service string) bool { + legacy := map[string]struct{}{ + "budgets": {}, + "ce": {}, + "chime": {}, + "cloudfront": {}, + "ec2metadata": {}, + "iam": {}, + "importexport": {}, + "organizations": {}, + "route53": {}, + "sts": {}, + "support": {}, + "waf": {}, + } + + _, allowed := legacy[service] + return allowed +} + +func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { + var opt Options + opt.Set(opts...) + + if len(opt.ResolvedRegion) > 0 { + region = opt.ResolvedRegion + } + + s, hasService := p.Services[service] + + if service == Ec2metadataServiceID && !hasService { + endpoint := getEC2MetadataEndpoint(p.ID, service, opt.EC2MetadataEndpointMode) + return endpoint, nil + } + + if len(service) == 0 || !(hasService || opt.ResolveUnknownService) { + // Only return error if the resolver will not fallback to creating + // endpoint based on service endpoint ID passed in. + return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) + } + + if len(region) == 0 && allowLegacyEmptyRegion(service) && len(s.PartitionEndpoint) != 0 { + region = s.PartitionEndpoint + } + + if r, ok := isLegacyGlobalRegion(service, region, opt); ok { + region = r + } + + variant := opt.getEndpointVariant(service) + + endpoints := s.Endpoints + + serviceDefaults, hasServiceDefault := s.Defaults[defaultKey{Variant: variant}] + // If we searched for a variant which may have no explicit service defaults, + // then we need to inherit the standard service defaults except the hostname and dnsSuffix + if variant != 0 && !hasServiceDefault { + serviceDefaults = s.Defaults[defaultKey{}] + serviceDefaults.Hostname = "" + serviceDefaults.DNSSuffix = "" + } + + partitionDefaults, hasPartitionDefault := p.Defaults[defaultKey{Variant: variant}] + + var dnsSuffix string + if len(serviceDefaults.DNSSuffix) > 0 { + dnsSuffix = serviceDefaults.DNSSuffix + } else if variant == 0 { + // For legacy reasons the partition dnsSuffix is not in the defaults, so if we looked for + // a non-variant endpoint then we need to set the dnsSuffix. + dnsSuffix = p.DNSSuffix + } + + noDefaults := !hasServiceDefault && !hasPartitionDefault + + e, hasEndpoint := s.endpointForRegion(region, endpoints, variant) + if len(region) == 0 || (!hasEndpoint && (opt.StrictMatching || noDefaults)) { + return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(endpoints, variant)) + } + + defs := []endpoint{partitionDefaults, serviceDefaults} + + return e.resolve(service, p.ID, region, dnsSuffixTemplateKey, dnsSuffix, defs, opt) +} + +func getEC2MetadataEndpoint(partitionID, service string, mode EC2IMDSEndpointModeState) ResolvedEndpoint { + switch mode { + case EC2IMDSEndpointModeStateIPv6: + return ResolvedEndpoint{ + URL: ec2MetadataEndpointIPv6, + PartitionID: partitionID, + SigningRegion: "aws-global", + SigningName: service, + SigningNameDerived: true, + SigningMethod: "v4", + } + case EC2IMDSEndpointModeStateIPv4: + fallthrough + default: + return ResolvedEndpoint{ + URL: ec2MetadataEndpointIPv4, + PartitionID: partitionID, + SigningRegion: "aws-global", + SigningName: service, + SigningNameDerived: true, + SigningMethod: "v4", + } + } +} + +func isLegacyGlobalRegion(service string, region string, opt Options) (string, bool) { + if opt.getEndpointVariant(service) != 0 { + return "", false + } + + const ( + sts = "sts" + s3 = "s3" + awsGlobal = "aws-global" + ) + + switch { + case service == sts && opt.STSRegionalEndpoint == RegionalSTSEndpoint: + return region, false + case service == s3 && opt.S3UsEast1RegionalEndpoint == RegionalS3UsEast1Endpoint: + return region, false + default: + if _, ok := legacyGlobalRegions[service][region]; ok { + return awsGlobal, true + } + } + + return region, false +} + +func serviceList(ss services) []string { + list := make([]string, 0, len(ss)) + for k := range ss { + list = append(list, k) + } + return list +} +func endpointList(es serviceEndpoints, variant endpointVariant) []string { + list := make([]string, 0, len(es)) + for k := range es { + if k.Variant != variant { + continue + } + list = append(list, k.Region) + } + return list +} + +type regionRegex struct { + *regexp.Regexp +} + +func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) { + // Strip leading and trailing quotes + regex, err := strconv.Unquote(string(b)) + if err != nil { + return fmt.Errorf("unable to strip quotes from regex, %v", err) + } + + rr.Regexp, err = regexp.Compile(regex) + if err != nil { + return fmt.Errorf("unable to unmarshal region regex, %v", err) + } + return nil +} + +type regions map[string]region + +type region struct { + Description string `json:"description"` +} + +type services map[string]service + +type service struct { + PartitionEndpoint string `json:"partitionEndpoint"` + IsRegionalized boxedBool `json:"isRegionalized,omitempty"` + Defaults endpointDefaults `json:"defaults"` + Endpoints serviceEndpoints `json:"endpoints"` +} + +func (s *service) endpointForRegion(region string, endpoints serviceEndpoints, variant endpointVariant) (endpoint, bool) { + if e, ok := endpoints[endpointKey{Region: region, Variant: variant}]; ok { + return e, true + } + + if s.IsRegionalized == boxedFalse { + return endpoints[endpointKey{Region: s.PartitionEndpoint, Variant: variant}], region == s.PartitionEndpoint + } + + // Unable to find any matching endpoint, return + // blank that will be used for generic endpoint creation. + return endpoint{}, false +} + +type serviceEndpoints map[endpointKey]endpoint + +func (s *serviceEndpoints) UnmarshalJSON(data []byte) error { + if *s == nil { + *s = make(serviceEndpoints) + } + + var regionToEndpoint map[string]endpointWithVariants + + if err := json.Unmarshal(data, ®ionToEndpoint); err != nil { + return err + } + + for region, e := range regionToEndpoint { + (*s)[endpointKey{Region: region}] = e.endpoint + + e.Hostname = "" + e.DNSSuffix = "" + + for _, variant := range e.Variants { + endpointVariant, unknown := parseVariantTags(variant.Tags) + if unknown { + continue + } + + var ve endpoint + ve.mergeIn(e.endpoint) + ve.mergeIn(variant.endpoint) + + (*s)[endpointKey{Region: region, Variant: endpointVariant}] = ve + } + } + + return nil +} + +type endpoint struct { + Hostname string `json:"hostname"` + Protocols []string `json:"protocols"` + CredentialScope credentialScope `json:"credentialScope"` + + DNSSuffix string `json:"dnsSuffix"` + + // Signature Version not used + SignatureVersions []string `json:"signatureVersions"` + + // SSLCommonName not used. + SSLCommonName string `json:"sslCommonName"` + + Deprecated boxedBool `json:"deprecated"` +} + +// isZero returns whether the endpoint structure is an empty (zero) value. +func (e endpoint) isZero() bool { + switch { + case len(e.Hostname) != 0: + return false + case len(e.Protocols) != 0: + return false + case e.CredentialScope != (credentialScope{}): + return false + case len(e.SignatureVersions) != 0: + return false + case len(e.SSLCommonName) != 0: + return false + } + return true +} + +const ( + defaultProtocol = "https" + defaultSigner = "v4" +) + +var ( + protocolPriority = []string{"https", "http"} + signerPriority = []string{"v4", "v2"} +) + +func getByPriority(s []string, p []string, def string) string { + if len(s) == 0 { + return def + } + + for i := 0; i < len(p); i++ { + for j := 0; j < len(s); j++ { + if s[j] == p[i] { + return s[j] + } + } + } + + return s[0] +} + +func (e endpoint) resolve(service, partitionID, region, dnsSuffixTemplateVariable, dnsSuffix string, defs []endpoint, opts Options) (ResolvedEndpoint, error) { + var merged endpoint + for _, def := range defs { + merged.mergeIn(def) + } + merged.mergeIn(e) + e = merged + + signingRegion := e.CredentialScope.Region + if len(signingRegion) == 0 { + signingRegion = region + } + + signingName := e.CredentialScope.Service + var signingNameDerived bool + if len(signingName) == 0 { + signingName = service + signingNameDerived = true + } + + hostname := e.Hostname + + if !validateInputRegion(region) { + return ResolvedEndpoint{}, fmt.Errorf("invalid region identifier format provided") + } + + if len(merged.DNSSuffix) > 0 { + dnsSuffix = merged.DNSSuffix + } + + u := strings.Replace(hostname, "{service}", service, 1) + u = strings.Replace(u, "{region}", region, 1) + u = strings.Replace(u, dnsSuffixTemplateVariable, dnsSuffix, 1) + + scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) + u = fmt.Sprintf("%s://%s", scheme, u) + + if e.Deprecated == boxedTrue && opts.LogDeprecated && opts.Logger != nil { + opts.Logger.Log(fmt.Sprintf("endpoint identifier %q, url %q marked as deprecated", region, u)) + } + + return ResolvedEndpoint{ + URL: u, + PartitionID: partitionID, + SigningRegion: signingRegion, + SigningName: signingName, + SigningNameDerived: signingNameDerived, + SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), + }, nil +} + +func getEndpointScheme(protocols []string, disableSSL bool) string { + if disableSSL { + return "http" + } + + return getByPriority(protocols, protocolPriority, defaultProtocol) +} + +func (e *endpoint) mergeIn(other endpoint) { + if len(other.Hostname) > 0 { + e.Hostname = other.Hostname + } + if len(other.Protocols) > 0 { + e.Protocols = other.Protocols + } + if len(other.SignatureVersions) > 0 { + e.SignatureVersions = other.SignatureVersions + } + if len(other.CredentialScope.Region) > 0 { + e.CredentialScope.Region = other.CredentialScope.Region + } + if len(other.CredentialScope.Service) > 0 { + e.CredentialScope.Service = other.CredentialScope.Service + } + if len(other.SSLCommonName) > 0 { + e.SSLCommonName = other.SSLCommonName + } + if len(other.DNSSuffix) > 0 { + e.DNSSuffix = other.DNSSuffix + } + if other.Deprecated != boxedBoolUnset { + e.Deprecated = other.Deprecated + } +} + +type credentialScope struct { + Region string `json:"region"` + Service string `json:"service"` +} + +type boxedBool int + +func (b *boxedBool) UnmarshalJSON(buf []byte) error { + v, err := strconv.ParseBool(string(buf)) + if err != nil { + return err + } + + if v { + *b = boxedTrue + } else { + *b = boxedFalse + } + + return nil +} + +const ( + boxedBoolUnset boxedBool = iota + boxedFalse + boxedTrue +) + +func validateInputRegion(region string) bool { + return regionValidationRegex.MatchString(region) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go new file mode 100644 index 000000000..84922bca8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go @@ -0,0 +1,412 @@ +//go:build codegen +// +build codegen + +package endpoints + +import ( + "fmt" + "io" + "reflect" + "strings" + "text/template" + "unicode" +) + +// A CodeGenOptions are the options for code generating the endpoints into +// Go code from the endpoints model definition. +type CodeGenOptions struct { + // Options for how the model will be decoded. + DecodeModelOptions DecodeModelOptions + + // Disables code generation of the service endpoint prefix IDs defined in + // the model. + DisableGenerateServiceIDs bool +} + +// Set combines all of the option functions together +func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// CodeGenModel given a endpoints model file will decode it and attempt to +// generate Go code from the model definition. Error will be returned if +// the code is unable to be generated, or decoded. +func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error { + var opts CodeGenOptions + opts.Set(optFns...) + + resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) { + *d = opts.DecodeModelOptions + }) + if err != nil { + return err + } + + v := struct { + Resolver + CodeGenOptions + }{ + Resolver: resolver, + CodeGenOptions: opts, + } + + tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) + if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil { + return fmt.Errorf("failed to execute template, %v", err) + } + + return nil +} + +func toSymbol(v string) string { + out := []rune{} + for _, c := range strings.Title(v) { + if !(unicode.IsNumber(c) || unicode.IsLetter(c)) { + continue + } + + out = append(out, c) + } + + return string(out) +} + +func quoteString(v string) string { + return fmt.Sprintf("%q", v) +} + +func regionConstName(p, r string) string { + return toSymbol(p) + toSymbol(r) +} + +func partitionGetter(id string) string { + return fmt.Sprintf("%sPartition", toSymbol(id)) +} + +func partitionVarName(id string) string { + return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id))) +} + +func listPartitionNames(ps partitions) string { + names := []string{} + switch len(ps) { + case 1: + return ps[0].Name + case 2: + return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name) + default: + for i, p := range ps { + if i == len(ps)-1 { + names = append(names, "and "+p.Name) + } else { + names = append(names, p.Name) + } + } + return strings.Join(names, ", ") + } +} + +func boxedBoolIfSet(msg string, v boxedBool) string { + switch v { + case boxedTrue: + return fmt.Sprintf(msg, "boxedTrue") + case boxedFalse: + return fmt.Sprintf(msg, "boxedFalse") + default: + return "" + } +} + +func stringIfSet(msg, v string) string { + if len(v) == 0 { + return "" + } + + return fmt.Sprintf(msg, v) +} + +func stringSliceIfSet(msg string, vs []string) string { + if len(vs) == 0 { + return "" + } + + names := []string{} + for _, v := range vs { + names = append(names, `"`+v+`"`) + } + + return fmt.Sprintf(msg, strings.Join(names, ",")) +} + +func endpointIsSet(v endpoint) bool { + return !reflect.DeepEqual(v, endpoint{}) +} + +func serviceSet(ps partitions) map[string]struct{} { + set := map[string]struct{}{} + for _, p := range ps { + for id := range p.Services { + set[id] = struct{}{} + } + } + + return set +} + +func endpointVariantSetter(variant endpointVariant) (string, error) { + if variant == 0 { + return "0", nil + } + + if variant > (fipsVariant | dualStackVariant) { + return "", fmt.Errorf("unknown endpoint variant") + } + + var symbols []string + if variant&fipsVariant != 0 { + symbols = append(symbols, "fipsVariant") + } + if variant&dualStackVariant != 0 { + symbols = append(symbols, "dualStackVariant") + } + v := strings.Join(symbols, "|") + + return v, nil +} + +func endpointKeySetter(e endpointKey) (string, error) { + var sb strings.Builder + sb.WriteString("endpointKey{\n") + sb.WriteString(fmt.Sprintf("Region: %q,\n", e.Region)) + if e.Variant != 0 { + variantSetter, err := endpointVariantSetter(e.Variant) + if err != nil { + return "", err + } + sb.WriteString(fmt.Sprintf("Variant: %s,\n", variantSetter)) + } + sb.WriteString("}") + return sb.String(), nil +} + +func defaultKeySetter(e defaultKey) (string, error) { + var sb strings.Builder + sb.WriteString("defaultKey{\n") + if e.Variant != 0 { + variantSetter, err := endpointVariantSetter(e.Variant) + if err != nil { + return "", err + } + sb.WriteString(fmt.Sprintf("Variant: %s,\n", variantSetter)) + } + sb.WriteString("}") + return sb.String(), nil +} + +var funcMap = template.FuncMap{ + "ToSymbol": toSymbol, + "QuoteString": quoteString, + "RegionConst": regionConstName, + "PartitionGetter": partitionGetter, + "PartitionVarName": partitionVarName, + "ListPartitionNames": listPartitionNames, + "BoxedBoolIfSet": boxedBoolIfSet, + "StringIfSet": stringIfSet, + "StringSliceIfSet": stringSliceIfSet, + "EndpointIsSet": endpointIsSet, + "ServicesSet": serviceSet, + "EndpointVariantSetter": endpointVariantSetter, + "EndpointKeySetter": endpointKeySetter, + "DefaultKeySetter": defaultKeySetter, +} + +const v3Tmpl = ` +{{ define "defaults" -}} +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + + {{ template "partition consts" $.Resolver }} + + {{ range $_, $partition := $.Resolver }} + {{ template "partition region consts" $partition }} + {{ end }} + + {{ if not $.DisableGenerateServiceIDs -}} + {{ template "service consts" $.Resolver }} + {{- end }} + + {{ template "endpoint resolvers" $.Resolver }} +{{- end }} + +{{ define "partition consts" }} + // Partition identifiers + const ( + {{ range $_, $p := . -}} + {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition. + {{ end -}} + ) +{{- end }} + +{{ define "partition region consts" }} + // {{ .Name }} partition's regions. + const ( + {{ range $id, $region := .Regions -}} + {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}. + {{ end -}} + ) +{{- end }} + +{{ define "service consts" }} + // Service identifiers + const ( + {{ $serviceSet := ServicesSet . -}} + {{ range $id, $_ := $serviceSet -}} + {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}. + {{ end -}} + ) +{{- end }} + +{{ define "endpoint resolvers" }} + // DefaultResolver returns an Endpoint resolver that will be able + // to resolve endpoints for: {{ ListPartitionNames . }}. + // + // Use DefaultPartitions() to get the list of the default partitions. + func DefaultResolver() Resolver { + return defaultPartitions + } + + // DefaultPartitions returns a list of the partitions the SDK is bundled + // with. The available partitions are: {{ ListPartitionNames . }}. + // + // partitions := endpoints.DefaultPartitions + // for _, p := range partitions { + // // ... inspect partitions + // } + func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() + } + + var defaultPartitions = partitions{ + {{ range $_, $partition := . -}} + {{ PartitionVarName $partition.ID }}, + {{ end }} + } + + {{ range $_, $partition := . -}} + {{ $name := PartitionGetter $partition.ID -}} + // {{ $name }} returns the Resolver for {{ $partition.Name }}. + func {{ $name }}() Partition { + return {{ PartitionVarName $partition.ID }}.Partition() + } + var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }} + {{ end }} +{{ end }} + +{{ define "default partitions" }} + func DefaultPartitions() []Partition { + return []partition{ + {{ range $_, $partition := . -}} + // {{ ToSymbol $partition.ID}}Partition(), + {{ end }} + } + } +{{ end }} + +{{ define "gocode Partition" -}} +partition{ + {{ StringIfSet "ID: %q,\n" .ID -}} + {{ StringIfSet "Name: %q,\n" .Name -}} + {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} + RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }}, + {{ if (gt (len .Defaults) 0) -}} + Defaults: {{ template "gocode Defaults" .Defaults -}}, + {{ end -}} + Regions: {{ template "gocode Regions" .Regions }}, + Services: {{ template "gocode Services" .Services }}, +} +{{- end }} + +{{ define "gocode RegionRegex" -}} +regionRegex{ + Regexp: func() *regexp.Regexp{ + reg, _ := regexp.Compile({{ QuoteString .Regexp.String }}) + return reg + }(), +} +{{- end }} + +{{ define "gocode Regions" -}} +regions{ + {{ range $id, $region := . -}} + "{{ $id }}": {{ template "gocode Region" $region }}, + {{ end -}} +} +{{- end }} + +{{ define "gocode Region" -}} +region{ + {{ StringIfSet "Description: %q,\n" .Description -}} +} +{{- end }} + +{{ define "gocode Services" -}} +services{ + {{ range $id, $service := . -}} + "{{ $id }}": {{ template "gocode Service" $service }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Service" -}} +service{ + {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}} + {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}} + {{ if (gt (len .Defaults) 0) -}} + Defaults: {{ template "gocode Defaults" .Defaults -}}, + {{ end -}} + {{ if .Endpoints -}} + Endpoints: {{ template "gocode Endpoints" .Endpoints }}, + {{- end }} +} +{{- end }} + +{{ define "gocode Defaults" -}} +endpointDefaults{ + {{ range $id, $endpoint := . -}} + {{ DefaultKeySetter $id }}: {{ template "gocode Endpoint" $endpoint }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Endpoints" -}} +serviceEndpoints{ + {{ range $id, $endpoint := . -}} + {{ EndpointKeySetter $id }}: {{ template "gocode Endpoint" $endpoint }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Endpoint" -}} +endpoint{ + {{ StringIfSet "Hostname: %q,\n" .Hostname -}} + {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} + {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}} + {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}} + {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}} + {{ if or .CredentialScope.Region .CredentialScope.Service -}} + CredentialScope: credentialScope{ + {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}} + {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}} + }, + {{- end }} + {{ BoxedBoolIfSet "Deprecated: %s,\n" .Deprecated -}} +} +{{- end }} +` diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go new file mode 100644 index 000000000..fa06f7a8f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/errors.go @@ -0,0 +1,13 @@ +package aws + +import "github.com/aws/aws-sdk-go/aws/awserr" + +var ( + // ErrMissingRegion is an error that is returned if region configuration is + // not found. + ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) + + // ErrMissingEndpoint is an error that is returned if an endpoint cannot be + // resolved for a service. + ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go new file mode 100644 index 000000000..91a6f277a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go @@ -0,0 +1,12 @@ +package aws + +// JSONValue is a representation of a grab bag type that will be marshaled +// into a json string. This type can be used just like any other map. +// +// Example: +// +// values := aws.JSONValue{ +// "Foo": "Bar", +// } +// values["Baz"] = "Qux" +type JSONValue map[string]interface{} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go new file mode 100644 index 000000000..49674cc79 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -0,0 +1,121 @@ +package aws + +import ( + "log" + "os" +) + +// A LogLevelType defines the level logging should be performed at. Used to instruct +// the SDK which statements should be logged. +type LogLevelType uint + +// LogLevel returns the pointer to a LogLevel. Should be used to workaround +// not being able to take the address of a non-composite literal. +func LogLevel(l LogLevelType) *LogLevelType { + return &l +} + +// Value returns the LogLevel value or the default value LogOff if the LogLevel +// is nil. Safe to use on nil value LogLevelTypes. +func (l *LogLevelType) Value() LogLevelType { + if l != nil { + return *l + } + return LogOff +} + +// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be +// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If +// LogLevel is nil, will default to LogOff comparison. +func (l *LogLevelType) Matches(v LogLevelType) bool { + c := l.Value() + return c&v == v +} + +// AtLeast returns true if this LogLevel is at least high enough to satisfies v. +// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default +// to LogOff comparison. +func (l *LogLevelType) AtLeast(v LogLevelType) bool { + c := l.Value() + return c >= v +} + +const ( + // LogOff states that no logging should be performed by the SDK. This is the + // default state of the SDK, and should be use to disable all logging. + LogOff LogLevelType = iota * 0x1000 + + // LogDebug state that debug output should be logged by the SDK. This should + // be used to inspect request made and responses received. + LogDebug +) + +// Debug Logging Sub Levels +const ( + // LogDebugWithSigning states that the SDK should log request signing and + // presigning events. This should be used to log the signing details of + // requests for debugging. Will also enable LogDebug. + LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) + + // LogDebugWithHTTPBody states the SDK should log HTTP request and response + // HTTP bodys in addition to the headers and path. This should be used to + // see the body content of requests and responses made while using the SDK + // Will also enable LogDebug. + LogDebugWithHTTPBody + + // LogDebugWithRequestRetries states the SDK should log when service requests will + // be retried. This should be used to log when you want to log when service + // requests are being retried. Will also enable LogDebug. + LogDebugWithRequestRetries + + // LogDebugWithRequestErrors states the SDK should log when service requests fail + // to build, send, validate, or unmarshal. + LogDebugWithRequestErrors + + // LogDebugWithEventStreamBody states the SDK should log EventStream + // request and response bodys. This should be used to log the EventStream + // wire unmarshaled message content of requests and responses made while + // using the SDK Will also enable LogDebug. + LogDebugWithEventStreamBody + + // LogDebugWithDeprecated states the SDK should log details about deprecated functionality. + LogDebugWithDeprecated +) + +// A Logger is a minimalistic interface for the SDK to log messages to. Should +// be used to provide custom logging writers for the SDK to use. +type Logger interface { + Log(...interface{}) +} + +// A LoggerFunc is a convenience type to convert a function taking a variadic +// list of arguments and wrap it so the Logger interface can be used. +// +// Example: +// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { +// fmt.Fprintln(os.Stdout, args...) +// })}) +type LoggerFunc func(...interface{}) + +// Log calls the wrapped function with the arguments provided +func (f LoggerFunc) Log(args ...interface{}) { + f(args...) +} + +// NewDefaultLogger returns a Logger which will write log messages to stdout, and +// use same formatting runes as the stdlib log.Logger +func NewDefaultLogger() Logger { + return &defaultLogger{ + logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +// A defaultLogger provides a minimalistic logger satisfying the Logger interface. +type defaultLogger struct { + logger *log.Logger +} + +// Log logs the parameters to the stdlib logger. See log.Println. +func (l defaultLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go new file mode 100644 index 000000000..2ba3c56c1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go @@ -0,0 +1,19 @@ +package request + +import ( + "strings" +) + +func isErrConnectionReset(err error) bool { + if strings.Contains(err.Error(), "read: connection reset") { + return false + } + + if strings.Contains(err.Error(), "use of closed network connection") || + strings.Contains(err.Error(), "connection reset") || + strings.Contains(err.Error(), "broken pipe") { + return true + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go new file mode 100644 index 000000000..9556332b6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -0,0 +1,346 @@ +package request + +import ( + "fmt" + "strings" +) + +// A Handlers provides a collection of request handlers for various +// stages of handling requests. +type Handlers struct { + Validate HandlerList + Build HandlerList + BuildStream HandlerList + Sign HandlerList + Send HandlerList + ValidateResponse HandlerList + Unmarshal HandlerList + UnmarshalStream HandlerList + UnmarshalMeta HandlerList + UnmarshalError HandlerList + Retry HandlerList + AfterRetry HandlerList + CompleteAttempt HandlerList + Complete HandlerList +} + +// Copy returns a copy of this handler's lists. +func (h *Handlers) Copy() Handlers { + return Handlers{ + Validate: h.Validate.copy(), + Build: h.Build.copy(), + BuildStream: h.BuildStream.copy(), + Sign: h.Sign.copy(), + Send: h.Send.copy(), + ValidateResponse: h.ValidateResponse.copy(), + Unmarshal: h.Unmarshal.copy(), + UnmarshalStream: h.UnmarshalStream.copy(), + UnmarshalError: h.UnmarshalError.copy(), + UnmarshalMeta: h.UnmarshalMeta.copy(), + Retry: h.Retry.copy(), + AfterRetry: h.AfterRetry.copy(), + CompleteAttempt: h.CompleteAttempt.copy(), + Complete: h.Complete.copy(), + } +} + +// Clear removes callback functions for all handlers. +func (h *Handlers) Clear() { + h.Validate.Clear() + h.Build.Clear() + h.BuildStream.Clear() + h.Send.Clear() + h.Sign.Clear() + h.Unmarshal.Clear() + h.UnmarshalStream.Clear() + h.UnmarshalMeta.Clear() + h.UnmarshalError.Clear() + h.ValidateResponse.Clear() + h.Retry.Clear() + h.AfterRetry.Clear() + h.CompleteAttempt.Clear() + h.Complete.Clear() +} + +// IsEmpty returns if there are no handlers in any of the handlerlists. +func (h *Handlers) IsEmpty() bool { + if h.Validate.Len() != 0 { + return false + } + if h.Build.Len() != 0 { + return false + } + if h.BuildStream.Len() != 0 { + return false + } + if h.Send.Len() != 0 { + return false + } + if h.Sign.Len() != 0 { + return false + } + if h.Unmarshal.Len() != 0 { + return false + } + if h.UnmarshalStream.Len() != 0 { + return false + } + if h.UnmarshalMeta.Len() != 0 { + return false + } + if h.UnmarshalError.Len() != 0 { + return false + } + if h.ValidateResponse.Len() != 0 { + return false + } + if h.Retry.Len() != 0 { + return false + } + if h.AfterRetry.Len() != 0 { + return false + } + if h.CompleteAttempt.Len() != 0 { + return false + } + if h.Complete.Len() != 0 { + return false + } + + return true +} + +// A HandlerListRunItem represents an entry in the HandlerList which +// is being run. +type HandlerListRunItem struct { + Index int + Handler NamedHandler + Request *Request +} + +// A HandlerList manages zero or more handlers in a list. +type HandlerList struct { + list []NamedHandler + + // Called after each request handler in the list is called. If set + // and the func returns true the HandlerList will continue to iterate + // over the request handlers. If false is returned the HandlerList + // will stop iterating. + // + // Should be used if extra logic to be performed between each handler + // in the list. This can be used to terminate a list's iteration + // based on a condition such as error like, HandlerListStopOnError. + // Or for logging like HandlerListLogItem. + AfterEachFn func(item HandlerListRunItem) bool +} + +// A NamedHandler is a struct that contains a name and function callback. +type NamedHandler struct { + Name string + Fn func(*Request) +} + +// copy creates a copy of the handler list. +func (l *HandlerList) copy() HandlerList { + n := HandlerList{ + AfterEachFn: l.AfterEachFn, + } + if len(l.list) == 0 { + return n + } + + n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...) + return n +} + +// Clear clears the handler list. +func (l *HandlerList) Clear() { + l.list = l.list[0:0] +} + +// Len returns the number of handlers in the list. +func (l *HandlerList) Len() int { + return len(l.list) +} + +// PushBack pushes handler f to the back of the handler list. +func (l *HandlerList) PushBack(f func(*Request)) { + l.PushBackNamed(NamedHandler{"__anonymous", f}) +} + +// PushBackNamed pushes named handler f to the back of the handler list. +func (l *HandlerList) PushBackNamed(n NamedHandler) { + if cap(l.list) == 0 { + l.list = make([]NamedHandler, 0, 5) + } + l.list = append(l.list, n) +} + +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.PushFrontNamed(NamedHandler{"__anonymous", f}) +} + +// PushFrontNamed pushes named handler f to the front of the handler list. +func (l *HandlerList) PushFrontNamed(n NamedHandler) { + if cap(l.list) == len(l.list) { + // Allocating new list required + l.list = append([]NamedHandler{n}, l.list...) + } else { + // Enough room to prepend into list. + l.list = append(l.list, NamedHandler{}) + copy(l.list[1:], l.list) + l.list[0] = n + } +} + +// Remove removes a NamedHandler n +func (l *HandlerList) Remove(n NamedHandler) { + l.RemoveByName(n.Name) +} + +// RemoveByName removes a NamedHandler by name. +func (l *HandlerList) RemoveByName(name string) { + for i := 0; i < len(l.list); i++ { + m := l.list[i] + if m.Name == name { + // Shift array preventing creating new arrays + copy(l.list[i:], l.list[i+1:]) + l.list[len(l.list)-1] = NamedHandler{} + l.list = l.list[:len(l.list)-1] + + // decrement list so next check to length is correct + i-- + } + } +} + +// SwapNamed will swap out any existing handlers with the same name as the +// passed in NamedHandler returning true if handlers were swapped. False is +// returned otherwise. +func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == n.Name { + l.list[i].Fn = n.Fn + swapped = true + } + } + + return swapped +} + +// Swap will swap out all handlers matching the name passed in. The matched +// handlers will be swapped in. True is returned if the handlers were swapped. +func (l *HandlerList) Swap(name string, replace NamedHandler) bool { + var swapped bool + + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == name { + l.list[i] = replace + swapped = true + } + } + + return swapped +} + +// SetBackNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the end of the list. +func (l *HandlerList) SetBackNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushBackNamed(n) + } +} + +// SetFrontNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the beginning of +// the list. +func (l *HandlerList) SetFrontNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushFrontNamed(n) + } +} + +// Run executes all handlers in the list with a given request object. +func (l *HandlerList) Run(r *Request) { + for i, h := range l.list { + h.Fn(r) + item := HandlerListRunItem{ + Index: i, Handler: h, Request: r, + } + if l.AfterEachFn != nil && !l.AfterEachFn(item) { + return + } + } +} + +// HandlerListLogItem logs the request handler and the state of the +// request's Error value. Always returns true to continue iterating +// request handlers in a HandlerList. +func HandlerListLogItem(item HandlerListRunItem) bool { + if item.Request.Config.Logger == nil { + return true + } + item.Request.Config.Logger.Log("DEBUG: RequestHandler", + item.Index, item.Handler.Name, item.Request.Error) + + return true +} + +// HandlerListStopOnError returns false to stop the HandlerList iterating +// over request handlers if Request.Error is not nil. True otherwise +// to continue iterating. +func HandlerListStopOnError(item HandlerListRunItem) bool { + return item.Request.Error == nil +} + +// WithAppendUserAgent will add a string to the user agent prefixed with a +// single white space. +func WithAppendUserAgent(s string) Option { + return func(r *Request) { + r.Handlers.Build.PushBack(func(r2 *Request) { + AddToUserAgent(r, s) + }) + } +} + +// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request +// header. If the extra parameters are provided they will be added as metadata to the +// name/version pair resulting in the following format. +// "name/version (extra0; extra1; ...)" +// The user agent part will be concatenated with this current request's user agent string. +func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { + ua := fmt.Sprintf("%s/%s", name, version) + if len(extra) > 0 { + ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) + } + return func(r *Request) { + AddToUserAgent(r, ua) + } +} + +// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. +// The input string will be concatenated with the current request's user agent string. +func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { + return func(r *Request) { + AddToUserAgent(r, s) + } +} + +// WithSetRequestHeaders updates the operation request's HTTP header to contain +// the header key value pairs provided. If the header key already exists in the +// request's HTTP header set, the existing value(s) will be replaced. +// +// Header keys added will be added as canonical format with title casing +// applied via http.Header.Set method. +func WithSetRequestHeaders(h map[string]string) Option { + return withRequestHeader(h).SetRequestHeaders +} + +type withRequestHeader map[string]string + +func (h withRequestHeader) SetRequestHeaders(r *Request) { + for k, v := range h { + r.HTTPRequest.Header.Set(k, v) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go new file mode 100644 index 000000000..79f79602b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go @@ -0,0 +1,24 @@ +package request + +import ( + "io" + "net/http" + "net/url" +) + +func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { + req := new(http.Request) + *req = *r + req.URL = &url.URL{} + *req.URL = *r.URL + req.Body = body + + req.Header = http.Header{} + for k, v := range r.Header { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go new file mode 100644 index 000000000..9370fa50c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -0,0 +1,65 @@ +package request + +import ( + "io" + "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// offsetReader is a thread-safe io.ReadCloser to prevent racing +// with retrying requests +type offsetReader struct { + buf io.ReadSeeker + lock sync.Mutex + closed bool +} + +func newOffsetReader(buf io.ReadSeeker, offset int64) (*offsetReader, error) { + reader := &offsetReader{} + _, err := buf.Seek(offset, sdkio.SeekStart) + if err != nil { + return nil, err + } + + reader.buf = buf + return reader, nil +} + +// Close will close the instance of the offset reader's access to +// the underlying io.ReadSeeker. +func (o *offsetReader) Close() error { + o.lock.Lock() + defer o.lock.Unlock() + o.closed = true + return nil +} + +// Read is a thread-safe read of the underlying io.ReadSeeker +func (o *offsetReader) Read(p []byte) (int, error) { + o.lock.Lock() + defer o.lock.Unlock() + + if o.closed { + return 0, io.EOF + } + + return o.buf.Read(p) +} + +// Seek is a thread-safe seeking operation. +func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { + o.lock.Lock() + defer o.lock.Unlock() + + return o.buf.Seek(offset, whence) +} + +// CloseAndCopy will return a new offsetReader with a copy of the old buffer +// and close the old buffer. +func (o *offsetReader) CloseAndCopy(offset int64) (*offsetReader, error) { + if err := o.Close(); err != nil { + return nil, err + } + return newOffsetReader(o.buf, offset) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go new file mode 100644 index 000000000..636d9ec94 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -0,0 +1,722 @@ +package request + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +const ( + // ErrCodeSerialization is the serialization error code that is received + // during protocol unmarshaling. + ErrCodeSerialization = "SerializationError" + + // ErrCodeRead is an error that is returned during HTTP reads. + ErrCodeRead = "ReadError" + + // ErrCodeResponseTimeout is the connection timeout error that is received + // during body reads. + ErrCodeResponseTimeout = "ResponseTimeout" + + // ErrCodeInvalidPresignExpire is returned when the expire time provided to + // presign is invalid + ErrCodeInvalidPresignExpire = "InvalidPresignExpireError" + + // CanceledErrorCode is the error code that will be returned by an + // API request that was canceled. Requests given a aws.Context may + // return this error when canceled. + CanceledErrorCode = "RequestCanceled" + + // ErrCodeRequestError is an error preventing the SDK from continuing to + // process the request. + ErrCodeRequestError = "RequestError" +) + +// A Request is the service request to be made. +type Request struct { + Config aws.Config + ClientInfo metadata.ClientInfo + Handlers Handlers + + Retryer + AttemptTime time.Time + Time time.Time + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + streamingBody io.ReadCloser + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + LastSignedAt time.Time + DisableFollowRedirects bool + + // Additional API error codes that should be retried. IsErrorRetryable + // will consider these codes in addition to its built in cases. + RetryErrorCodes []string + + // Additional API error codes that should be retried with throttle backoff + // delay. IsErrorThrottle will consider these codes in addition to its + // built in cases. + ThrottleErrorCodes []string + + // A value greater than 0 instructs the request to be signed as Presigned URL + // You should not set this field directly. Instead use Request's + // Presign or PresignRequest methods. + ExpireTime time.Duration + + context aws.Context + + built bool + + // Need to persist an intermediate body between the input Body and HTTP + // request body because the HTTP Client's transport can maintain a reference + // to the HTTP request's body after the client has returned. This value is + // safe to use concurrently and wrap the input Body for each HTTP request. + safeBody *offsetReader +} + +// An Operation is the service API operation to be made. +type Operation struct { + Name string + HTTPMethod string + HTTPPath string + *Paginator + + BeforePresignFn func(r *Request) error +} + +// New returns a new Request pointer for the service API operation and +// parameters. +// +// A Retryer should be provided to direct how the request is retried. If +// Retryer is nil, a default no retry value will be used. You can use +// NoOpRetryer in the Client package to disable retry behavior directly. +// +// Params is any value of input parameters to be the request payload. +// Data is pointer value to an object which the request's response +// payload will be deserialized to. +func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, + retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + + if retryer == nil { + retryer = noOpRetryer{} + } + + method := operation.HTTPMethod + if method == "" { + method = "POST" + } + + httpReq, _ := http.NewRequest(method, "", nil) + + var err error + httpReq.URL, err = url.Parse(clientInfo.Endpoint) + if err != nil { + httpReq.URL = &url.URL{} + err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) + } + + if len(operation.HTTPPath) != 0 { + opHTTPPath := operation.HTTPPath + var opQueryString string + if idx := strings.Index(opHTTPPath, "?"); idx >= 0 { + opQueryString = opHTTPPath[idx+1:] + opHTTPPath = opHTTPPath[:idx] + } + + if strings.HasSuffix(httpReq.URL.Path, "/") && strings.HasPrefix(opHTTPPath, "/") { + opHTTPPath = opHTTPPath[1:] + } + httpReq.URL.Path += opHTTPPath + httpReq.URL.RawQuery = opQueryString + } + + r := &Request{ + Config: cfg, + ClientInfo: clientInfo, + Handlers: handlers.Copy(), + + Retryer: retryer, + Time: time.Now(), + ExpireTime: 0, + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: err, + Data: data, + } + r.SetBufferBody([]byte{}) + + return r +} + +// A Option is a functional option that can augment or modify a request when +// using a WithContext API operation method. +type Option func(*Request) + +// WithGetResponseHeader builds a request Option which will retrieve a single +// header value from the HTTP Response. If there are multiple values for the +// header key use WithGetResponseHeaders instead to access the http.Header +// map directly. The passed in val pointer must be non-nil. +// +// This Option can be used multiple times with a single API operation. +// +// var id2, versionID string +// svc.PutObjectWithContext(ctx, params, +// request.WithGetResponseHeader("x-amz-id-2", &id2), +// request.WithGetResponseHeader("x-amz-version-id", &versionID), +// ) +func WithGetResponseHeader(key string, val *string) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *val = req.HTTPResponse.Header.Get(key) + }) + } +} + +// WithGetResponseHeaders builds a request Option which will retrieve the +// headers from the HTTP response and assign them to the passed in headers +// variable. The passed in headers pointer must be non-nil. +// +// var headers http.Header +// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers)) +func WithGetResponseHeaders(headers *http.Header) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *headers = req.HTTPResponse.Header + }) + } +} + +// WithLogLevel is a request option that will set the request to use a specific +// log level when the request is made. +// +// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody) +func WithLogLevel(l aws.LogLevelType) Option { + return func(r *Request) { + r.Config.LogLevel = aws.LogLevel(l) + } +} + +// ApplyOptions will apply each option to the request calling them in the order +// the were provided. +func (r *Request) ApplyOptions(opts ...Option) { + for _, opt := range opts { + opt(r) + } +} + +// Context will always returns a non-nil context. If Request does not have a +// context aws.BackgroundContext will be returned. +func (r *Request) Context() aws.Context { + if r.context != nil { + return r.context + } + return aws.BackgroundContext() +} + +// SetContext adds a Context to the current request that can be used to cancel +// a in-flight request. The Context value must not be nil, or this method will +// panic. +// +// Unlike http.Request.WithContext, SetContext does not return a copy of the +// Request. It is not safe to use use a single Request value for multiple +// requests. A new Request should be created for each API operation request. +// +// Go 1.6 and below: +// The http.Request's Cancel field will be set to the Done() value of +// the context. This will overwrite the Cancel field's value. +// +// Go 1.7 and above: +// The http.Request.WithContext will be used to set the context on the underlying +// http.Request. This will create a shallow copy of the http.Request. The SDK +// may create sub contexts in the future for nested requests such as retries. +func (r *Request) SetContext(ctx aws.Context) { + if ctx == nil { + panic("context cannot be nil") + } + setRequestContext(r, ctx) +} + +// WillRetry returns if the request's can be retried. +func (r *Request) WillRetry() bool { + if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody { + return false + } + return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() +} + +func fmtAttemptCount(retryCount, maxRetries int) string { + return fmt.Sprintf("attempt %v/%v", retryCount, maxRetries) +} + +// ParamsFilled returns if the request's parameters have been populated +// and the parameters are valid. False is returned if no parameters are +// provided or invalid. +func (r *Request) ParamsFilled() bool { + return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() +} + +// DataFilled returns true if the request's data for response deserialization +// target has been set and is a valid. False is returned if data is not +// set, or is invalid. +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +// SetBufferBody will set the request's body bytes that will be sent to +// the service API. +func (r *Request) SetBufferBody(buf []byte) { + r.SetReaderBody(bytes.NewReader(buf)) +} + +// SetStringBody sets the body of the request to be backed by a string. +func (r *Request) SetStringBody(s string) { + r.SetReaderBody(strings.NewReader(s)) +} + +// SetReaderBody will set the request's body reader. +func (r *Request) SetReaderBody(reader io.ReadSeeker) { + r.Body = reader + + if aws.IsReaderSeekable(reader) { + var err error + // Get the Bodies current offset so retries will start from the same + // initial position. + r.BodyStart, err = reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + r.Error = awserr.New(ErrCodeSerialization, + "failed to determine start of request body", err) + return + } + } + r.ResetBody() +} + +// SetStreamingBody set the reader to be used for the request that will stream +// bytes to the server. Request's Body must not be set to any reader. +func (r *Request) SetStreamingBody(reader io.ReadCloser) { + r.streamingBody = reader + r.SetReaderBody(aws.ReadSeekCloser(reader)) +} + +// Presign returns the request's signed URL. Error will be returned +// if the signing fails. The expire parameter is only used for presigned Amazon +// S3 API requests. All other AWS services will use a fixed expiration +// time of 15 minutes. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +func (r *Request) Presign(expire time.Duration) (string, error) { + r = r.copy() + + // Presign requires all headers be hoisted. There is no way to retrieve + // the signed headers not hoisted without this. Making the presigned URL + // useless. + r.NotHoist = false + + u, _, err := getPresignedURL(r, expire) + return u, err +} + +// PresignRequest behaves just like presign, with the addition of returning a +// set of headers that were signed. The expire parameter is only used for +// presigned Amazon S3 API requests. All other AWS services will use a fixed +// expiration time of 15 minutes. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +// +// Returns the URL string for the API operation with signature in the query string, +// and the HTTP headers that were included in the signature. These headers must +// be included in any HTTP request made with the presigned URL. +// +// To prevent hoisting any headers to the query string set NotHoist to true on +// this Request value prior to calling PresignRequest. +func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) { + r = r.copy() + return getPresignedURL(r, expire) +} + +// IsPresigned returns true if the request represents a presigned API url. +func (r *Request) IsPresigned() bool { + return r.ExpireTime != 0 +} + +func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) { + if expire <= 0 { + return "", nil, awserr.New( + ErrCodeInvalidPresignExpire, + "presigned URL requires an expire duration greater than 0", + nil, + ) + } + + r.ExpireTime = expire + + if r.Operation.BeforePresignFn != nil { + if err := r.Operation.BeforePresignFn(r); err != nil { + return "", nil, err + } + } + + if err := r.Sign(); err != nil { + return "", nil, err + } + + return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil +} + +const ( + notRetrying = "not retrying" +) + +func debugLogReqError(r *Request, stage, retryStr string, err error) { + if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { + return + } + + r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", + stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) +} + +// Build will build the request's object so it can be signed and sent +// to the service. Build will also validate all the request's parameters. +// Any additional build Handlers set on this request will be run +// in the order they were set. +// +// The request will only be built once. Multiple calls to build will have +// no effect. +// +// If any Validate or Build errors occur the build will stop and the error +// which occurred will be returned. +func (r *Request) Build() error { + if !r.built { + r.Handlers.Validate.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Request", notRetrying, r.Error) + return r.Error + } + r.Handlers.Build.Run(r) + if r.Error != nil { + debugLogReqError(r, "Build Request", notRetrying, r.Error) + return r.Error + } + r.built = true + } + + return r.Error +} + +// Sign will sign the request, returning error if errors are encountered. +// +// Sign will build the request prior to signing. All Sign Handlers will +// be executed in the order they were set. +func (r *Request) Sign() error { + r.Build() + if r.Error != nil { + debugLogReqError(r, "Build Request", notRetrying, r.Error) + return r.Error + } + + SanitizeHostForHeader(r.HTTPRequest) + + r.Handlers.Sign.Run(r) + return r.Error +} + +func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) { + if r.streamingBody != nil { + return r.streamingBody, nil + } + + if r.safeBody != nil { + r.safeBody.Close() + } + + r.safeBody, err = newOffsetReader(r.Body, r.BodyStart) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, + "failed to get next request body reader", err) + } + + // Go 1.8 tightened and clarified the rules code needs to use when building + // requests with the http package. Go 1.8 removed the automatic detection + // of if the Request.Body was empty, or actually had bytes in it. The SDK + // always sets the Request.Body even if it is empty and should not actually + // be sent. This is incorrect. + // + // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http + // client that the request really should be sent without a body. The + // Request.Body cannot be set to nil, which is preferable, because the + // field is exported and could introduce nil pointer dereferences for users + // of the SDK if they used that field. + // + // Related golang/go#18257 + l, err := aws.SeekerLen(r.Body) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, + "failed to compute request body size", err) + } + + if l == 0 { + body = NoBody + } else if l > 0 { + body = r.safeBody + } else { + // Hack to prevent sending bodies for methods where the body + // should be ignored by the server. Sending bodies on these + // methods without an associated ContentLength will cause the + // request to socket timeout because the server does not handle + // Transfer-Encoding: chunked bodies for these methods. + // + // This would only happen if a aws.ReaderSeekerCloser was used with + // a io.Reader that was not also an io.Seeker, or did not implement + // Len() method. + switch r.Operation.HTTPMethod { + case "GET", "HEAD", "DELETE": + body = NoBody + default: + body = r.safeBody + } + } + + return body, nil +} + +// GetBody will return an io.ReadSeeker of the Request's underlying +// input body with a concurrency safe wrapper. +func (r *Request) GetBody() io.ReadSeeker { + return r.safeBody +} + +// Send will send the request, returning error if errors are encountered. +// +// Send will sign the request prior to sending. All Send Handlers will +// be executed in the order they were set. +// +// Canceling a request is non-deterministic. If a request has been canceled, +// then the transport will choose, randomly, one of the state channels during +// reads or getting the connection. +// +// readLoop() and getConn(req *Request, cm connectMethod) +// https://github.com/golang/go/blob/master/src/net/http/transport.go +// +// Send will not close the request.Request's body. +func (r *Request) Send() error { + defer func() { + // Ensure a non-nil HTTPResponse parameter is set to ensure handlers + // checking for HTTPResponse values, don't fail. + if r.HTTPResponse == nil { + r.HTTPResponse = &http.Response{ + Header: http.Header{}, + Body: ioutil.NopCloser(&bytes.Buffer{}), + } + } + // Regardless of success or failure of the request trigger the Complete + // request handlers. + r.Handlers.Complete.Run(r) + }() + + if err := r.Error; err != nil { + return err + } + + for { + r.Error = nil + r.AttemptTime = time.Now() + + if err := r.Sign(); err != nil { + debugLogReqError(r, "Sign Request", notRetrying, err) + return err + } + + if err := r.sendRequest(); err == nil { + return nil + } + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + + if r.Error != nil || !aws.BoolValue(r.Retryable) { + return r.Error + } + + if err := r.prepareRetry(); err != nil { + r.Error = err + return err + } + } +} + +func (r *Request) prepareRetry() error { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + // The previous http.Request will have a reference to the r.Body + // and the HTTP Client's Transport may still be reading from + // the request's body even though the Client's Do returned. + r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) + r.ResetBody() + if err := r.Error; err != nil { + return awserr.New(ErrCodeSerialization, + "failed to prepare body for retry", err) + + } + + // Closing response body to ensure that no response body is leaked + // between retry attempts. + if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { + r.HTTPResponse.Body.Close() + } + + return nil +} + +func (r *Request) sendRequest() (sendErr error) { + defer r.Handlers.CompleteAttempt.Run(r) + + r.Retryable = nil + r.Handlers.Send.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + r.Handlers.UnmarshalError.Run(r) + debugLogReqError(r, "Validate Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + return nil +} + +// copy will copy a request which will allow for local manipulation of the +// request. +func (r *Request) copy() *Request { + req := &Request{} + *req = *r + req.Handlers = r.Handlers.Copy() + op := *r.Operation + req.Operation = &op + return req +} + +// AddToUserAgent adds the string to the end of the request's current user agent. +func AddToUserAgent(r *Request, s string) { + curUA := r.HTTPRequest.Header.Get("User-Agent") + if len(curUA) > 0 { + s = curUA + " " + s + } + r.HTTPRequest.Header.Set("User-Agent", s) +} + +// SanitizeHostForHeader removes default port from host and updates request.Host +func SanitizeHostForHeader(r *http.Request) { + host := getHost(r) + port := portOnly(host) + if port != "" && isDefaultPort(r.URL.Scheme, port) { + r.Host = stripPort(host) + } +} + +// Returns host from request +func getHost(r *http.Request) string { + if r.Host != "" { + return r.Host + } + + if r.URL == nil { + return "" + } + + return r.URL.Host +} + +// Hostname returns u.Host, without any port number. +// +// If Host is an IPv6 literal with a port number, Hostname returns the +// IPv6 literal without the square brackets. IPv6 literals may include +// a zone identifier. +// +// Copied from the Go 1.8 standard library (net/url) +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} + +// Port returns the port part of u.Host, without the leading colon. +// If u.Host doesn't contain a port, Port returns an empty string. +// +// Copied from the Go 1.8 standard library (net/url) +func portOnly(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return "" + } + if i := strings.Index(hostport, "]:"); i != -1 { + return hostport[i+len("]:"):] + } + if strings.Contains(hostport, "]") { + return "" + } + return hostport[colon+len(":"):] +} + +// Returns true if the specified URI is using the standard port +// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) +func isDefaultPort(scheme, port string) bool { + if port == "" { + return true + } + + lowerCaseScheme := strings.ToLower(scheme) + if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { + return true + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go new file mode 100644 index 000000000..5921b8ff2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go @@ -0,0 +1,40 @@ +//go:build !go1.8 +// +build !go1.8 + +package request + +import "io" + +// NoBody is an io.ReadCloser with no bytes. Read always returns EOF +// and Close always returns nil. It can be used in an outgoing client +// request to explicitly signal that a request has zero bytes. +// An alternative, however, is to simply set Request.Body to nil. +// +// Copy of Go 1.8 NoBody type from net/http/http.go +type noBody struct{} + +func (noBody) Read([]byte) (int, error) { return 0, io.EOF } +func (noBody) Close() error { return nil } +func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } + +// NoBody is an empty reader that will trigger the Go HTTP client to not include +// and body in the HTTP request. +var NoBody = noBody{} + +// ResetBody rewinds the request body back to its starting position, and +// sets the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = err + return + } + + r.HTTPRequest.Body = body +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go new file mode 100644 index 000000000..ea643c9c4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go @@ -0,0 +1,37 @@ +//go:build go1.8 +// +build go1.8 + +package request + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// NoBody is a http.NoBody reader instructing Go HTTP client to not include +// and body in the HTTP request. +var NoBody = http.NoBody + +// ResetBody rewinds the request body back to its starting position, and +// sets the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +// +// Will also set the Go 1.8's http.Request.GetBody member to allow retrying +// PUT/POST redirects. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = awserr.New(ErrCodeSerialization, + "failed to reset request body", err) + return + } + + r.HTTPRequest.Body = body + r.HTTPRequest.GetBody = r.getNextRequestBody +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go new file mode 100644 index 000000000..d8c505302 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go @@ -0,0 +1,15 @@ +//go:build go1.7 +// +build go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest = r.HTTPRequest.WithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go new file mode 100644 index 000000000..49a243ef2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go @@ -0,0 +1,15 @@ +//go:build !go1.7 +// +build !go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest.Cancel = ctx.Done() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go new file mode 100644 index 000000000..64784e16f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -0,0 +1,266 @@ +package request + +import ( + "reflect" + "sync/atomic" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// A Pagination provides paginating of SDK API operations which are paginatable. +// Generally you should not use this type directly, but use the "Pages" API +// operations method to automatically perform pagination for you. Such as, +// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods. +// +// Pagination differs from a Paginator type in that pagination is the type that +// does the pagination between API operations, and Paginator defines the +// configuration that will be used per page request. +// +// for p.Next() { +// data := p.Page().(*s3.ListObjectsOutput) +// // process the page's data +// // ... +// // break out of loop to stop fetching additional pages +// } +// +// return p.Err() +// +// See service client API operation Pages methods for examples how the SDK will +// use the Pagination type. +type Pagination struct { + // Function to return a Request value for each pagination request. + // Any configuration or handlers that need to be applied to the request + // prior to getting the next page should be done here before the request + // returned. + // + // NewRequest should always be built from the same API operations. It is + // undefined if different API operations are returned on subsequent calls. + NewRequest func() (*Request, error) + // EndPageOnSameToken, when enabled, will allow the paginator to stop on + // token that are the same as its previous tokens. + EndPageOnSameToken bool + + started bool + prevTokens []interface{} + nextTokens []interface{} + + err error + curPage interface{} +} + +// HasNextPage will return true if Pagination is able to determine that the API +// operation has additional pages. False will be returned if there are no more +// pages remaining. +// +// Will always return true if Next has not been called yet. +func (p *Pagination) HasNextPage() bool { + if !p.started { + return true + } + + hasNextPage := len(p.nextTokens) != 0 + if p.EndPageOnSameToken { + return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens) + } + return hasNextPage +} + +// Err returns the error Pagination encountered when retrieving the next page. +func (p *Pagination) Err() error { + return p.err +} + +// Page returns the current page. Page should only be called after a successful +// call to Next. It is undefined what Page will return if Page is called after +// Next returns false. +func (p *Pagination) Page() interface{} { + return p.curPage +} + +// Next will attempt to retrieve the next page for the API operation. When a page +// is retrieved true will be returned. If the page cannot be retrieved, or there +// are no more pages false will be returned. +// +// Use the Page method to retrieve the current page data. The data will need +// to be cast to the API operation's output type. +// +// Use the Err method to determine if an error occurred if Page returns false. +func (p *Pagination) Next() bool { + if !p.HasNextPage() { + return false + } + + req, err := p.NewRequest() + if err != nil { + p.err = err + return false + } + + if p.started { + for i, intok := range req.Operation.InputTokens { + awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i]) + } + } + p.started = true + + err = req.Send() + if err != nil { + p.err = err + return false + } + + p.prevTokens = p.nextTokens + p.nextTokens = req.nextPageTokens() + p.curPage = req.Data + + return true +} + +// A Paginator is the configuration data that defines how an API operation +// should be paginated. This type is used by the API service models to define +// the generated pagination config for service APIs. +// +// The Pagination type is what provides iterating between pages of an API. It +// is only used to store the token metadata the SDK should use for performing +// pagination. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// nextPageTokens returns the tokens to use when asking for the next page of data. +func (r *Request) nextPageTokens() []interface{} { + if r.Operation.Paginator == nil { + return nil + } + if r.Operation.TruncationToken != "" { + tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) + if len(tr) == 0 { + return nil + } + + switch v := tr[0].(type) { + case *bool: + if !aws.BoolValue(v) { + return nil + } + case bool: + if !v { + return nil + } + } + } + + tokens := []interface{}{} + tokenAdded := false + for _, outToken := range r.Operation.OutputTokens { + vs, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(vs) == 0 { + tokens = append(tokens, nil) + continue + } + v := vs[0] + + switch tv := v.(type) { + case *string: + if len(aws.StringValue(tv)) == 0 { + tokens = append(tokens, nil) + continue + } + case string: + if len(tv) == 0 { + tokens = append(tokens, nil) + continue + } + } + + tokenAdded = true + tokens = append(tokens, v) + } + if !tokenAdded { + return nil + } + + return tokens +} + +// Ensure a deprecated item is only logged once instead of each time its used. +func logDeprecatedf(logger aws.Logger, flag *int32, msg string) { + if logger == nil { + return + } + if atomic.CompareAndSwapInt32(flag, 0, 1) { + logger.Log(msg) + } +} + +var ( + logDeprecatedHasNextPage int32 + logDeprecatedNextPage int32 + logDeprecatedEachPage int32 +) + +// HasNextPage returns true if this request has more pages of data available. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) HasNextPage() bool { + logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage, + "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations") + + return len(r.nextPageTokens()) > 0 +} + +// NextPage returns a new Request that can be executed to return the next +// page of result data. Call .Send() on this request to execute it. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) NextPage() *Request { + logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage, + "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations") + + tokens := r.nextPageTokens() + if len(tokens) == 0 { + return nil + } + + data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() + nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) + for i, intok := range nr.Operation.InputTokens { + awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) + } + return nr +} + +// EachPage iterates over each page of a paginated request object. The fn +// parameter should be a function with the following sample signature: +// +// func(page *T, lastPage bool) bool { +// return true // return false to stop iterating +// } +// +// Where "T" is the structure type matching the output structure of the given +// operation. For example, a request object generated by +// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput +// as the structure "T". The lastPage value represents whether the page is +// the last page of data or not. The return value of this function should +// return true to keep iterating or false to stop. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage, + "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations") + + for page := r; page != nil; page = page.NextPage() { + if err := page.Send(); err != nil { + return err + } + if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { + return page.Error + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go new file mode 100644 index 000000000..3f0001f91 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -0,0 +1,309 @@ +package request + +import ( + "net" + "net/url" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Retryer provides the interface drive the SDK's request retry behavior. The +// Retryer implementation is responsible for implementing exponential backoff, +// and determine if a request API error should be retried. +// +// client.DefaultRetryer is the SDK's default implementation of the Retryer. It +// uses the Request.IsErrorRetryable and Request.IsErrorThrottle methods to +// determine if the request is retried. +type Retryer interface { + // RetryRules return the retry delay that should be used by the SDK before + // making another request attempt for the failed request. + RetryRules(*Request) time.Duration + + // ShouldRetry returns if the failed request is retryable. + // + // Implementations may consider request attempt count when determining if a + // request is retryable, but the SDK will use MaxRetries to limit the + // number of attempts a request are made. + ShouldRetry(*Request) bool + + // MaxRetries is the number of times a request may be retried before + // failing. + MaxRetries() int +} + +// WithRetryer sets a Retryer value to the given Config returning the Config +// value for chaining. The value must not be nil. +func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + if retryer == nil { + if cfg.Logger != nil { + cfg.Logger.Log("ERROR: Request.WithRetryer called with nil retryer. Replacing with retry disabled Retryer.") + } + retryer = noOpRetryer{} + } + cfg.Retryer = retryer + return cfg + +} + +// noOpRetryer is a internal no op retryer used when a request is created +// without a retryer. +// +// Provides a retryer that performs no retries. +// It should be used when we do not want retries to be performed. +type noOpRetryer struct{} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API; For NoOpRetryer the MaxRetries will always be zero. +func (d noOpRetryer) MaxRetries() int { + return 0 +} + +// ShouldRetry will always return false for NoOpRetryer, as it should never retry. +func (d noOpRetryer) ShouldRetry(_ *Request) bool { + return false +} + +// RetryRules returns the delay duration before retrying this request again; +// since NoOpRetryer does not retry, RetryRules always returns 0. +func (d noOpRetryer) RetryRules(_ *Request) time.Duration { + return 0 +} + +// retryableCodes is a collection of service response codes which are retry-able +// without any further action. +var retryableCodes = map[string]struct{}{ + ErrCodeRequestError: {}, + "RequestTimeout": {}, + ErrCodeResponseTimeout: {}, + "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout +} + +var throttleCodes = map[string]struct{}{ + "ProvisionedThroughputExceededException": {}, + "ThrottledException": {}, // SNS, XRay, ResourceGroupsTagging API + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "RequestThrottledException": {}, + "TooManyRequestsException": {}, // Lambda functions + "PriorRequestNotComplete": {}, // Route53 + "TransactionInProgressException": {}, + "EC2ThrottledException": {}, // EC2 +} + +// credsExpiredCodes is a collection of error codes which signify the credentials +// need to be refreshed. Expired tokens require refreshing of credentials, and +// resigning before the request can be retried. +var credsExpiredCodes = map[string]struct{}{ + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "RequestExpired": {}, // EC2 Only +} + +func isCodeThrottle(code string) bool { + _, ok := throttleCodes[code] + return ok +} + +func isCodeRetryable(code string) bool { + if _, ok := retryableCodes[code]; ok { + return true + } + + return isCodeExpiredCreds(code) +} + +func isCodeExpiredCreds(code string) bool { + _, ok := credsExpiredCodes[code] + return ok +} + +var validParentCodes = map[string]struct{}{ + ErrCodeSerialization: {}, + ErrCodeRead: {}, +} + +func isNestedErrorRetryable(parentErr awserr.Error) bool { + if parentErr == nil { + return false + } + + if _, ok := validParentCodes[parentErr.Code()]; !ok { + return false + } + + err := parentErr.OrigErr() + if err == nil { + return false + } + + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) + } + + if t, ok := err.(temporary); ok { + return t.Temporary() || isErrConnectionReset(err) + } + + return isErrConnectionReset(err) +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if error is nil. +func IsErrorRetryable(err error) bool { + if err == nil { + return false + } + return shouldRetryError(err) +} + +type temporary interface { + Temporary() bool +} + +func shouldRetryError(origErr error) bool { + switch err := origErr.(type) { + case awserr.Error: + if err.Code() == CanceledErrorCode { + return false + } + if isNestedErrorRetryable(err) { + return true + } + + origErr := err.OrigErr() + var shouldRetry bool + if origErr != nil { + shouldRetry = shouldRetryError(origErr) + if err.Code() == ErrCodeRequestError && !shouldRetry { + return false + } + } + if isCodeRetryable(err.Code()) { + return true + } + return shouldRetry + + case *url.Error: + if strings.Contains(err.Error(), "connection refused") { + // Refused connections should be retried as the service may not yet + // be running on the port. Go TCP dial considers refused + // connections as not temporary. + return true + } + // *url.Error only implements Temporary after golang 1.6 but since + // url.Error only wraps the error: + return shouldRetryError(err.Err) + + case temporary: + if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" { + return true + } + // If the error is temporary, we want to allow continuation of the + // retry process + return err.Temporary() || isErrConnectionReset(origErr) + + case nil: + // `awserr.Error.OrigErr()` can be nil, meaning there was an error but + // because we don't know the cause, it is marked as retryable. See + // TestRequest4xxUnretryable for an example. + return true + + default: + switch err.Error() { + case "net/http: request canceled", + "net/http: request canceled while waiting for connection": + // known 1.5 error case when an http request is cancelled + return false + } + // here we don't know the error; so we allow a retry. + return true + } +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if error is nil. +func IsErrorThrottle(err error) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeThrottle(aerr.Code()) + } + return false +} + +// IsErrorExpiredCreds returns whether the error code is a credential expiry +// error. Returns false if error is nil. +func IsErrorExpiredCreds(err error) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeExpiredCreds(aerr.Code()) + } + return false +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorRetryable +func (r *Request) IsErrorRetryable() bool { + if isErrCode(r.Error, r.RetryErrorCodes) { + return true + } + + // HTTP response status code 501 should not be retried. + // 501 represents Not Implemented which means the request method is not + // supported by the server and cannot be handled. + if r.HTTPResponse != nil { + // HTTP response status code 500 represents internal server error and + // should be retried without any throttle. + if r.HTTPResponse.StatusCode == 500 { + return true + } + } + return IsErrorRetryable(r.Error) +} + +// IsErrorThrottle returns whether the error is to be throttled based on its +// code. Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorThrottle +func (r *Request) IsErrorThrottle() bool { + if isErrCode(r.Error, r.ThrottleErrorCodes) { + return true + } + + if r.HTTPResponse != nil { + switch r.HTTPResponse.StatusCode { + case + 429, // error caused due to too many requests + 502, // Bad Gateway error should be throttled + 503, // caused when service is unavailable + 504: // error occurred due to gateway timeout + return true + } + } + + return IsErrorThrottle(r.Error) +} + +func isErrCode(err error, codes []string) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + for _, code := range codes { + if code == aerr.Code() { + return true + } + } + } + + return false +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorExpiredCreds +func (r *Request) IsErrorExpired() bool { + return IsErrorExpiredCreds(r.Error) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go new file mode 100644 index 000000000..09a44eb98 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go @@ -0,0 +1,94 @@ +package request + +import ( + "io" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var timeoutErr = awserr.New( + ErrCodeResponseTimeout, + "read on body has reached the timeout limit", + nil, +) + +type readResult struct { + n int + err error +} + +// timeoutReadCloser will handle body reads that take too long. +// We will return a ErrReadTimeout error if a timeout occurs. +type timeoutReadCloser struct { + reader io.ReadCloser + duration time.Duration +} + +// Read will spin off a goroutine to call the reader's Read method. We will +// select on the timer's channel or the read's channel. Whoever completes first +// will be returned. +func (r *timeoutReadCloser) Read(b []byte) (int, error) { + timer := time.NewTimer(r.duration) + c := make(chan readResult, 1) + + go func() { + n, err := r.reader.Read(b) + timer.Stop() + c <- readResult{n: n, err: err} + }() + + select { + case data := <-c: + return data.n, data.err + case <-timer.C: + return 0, timeoutErr + } +} + +func (r *timeoutReadCloser) Close() error { + return r.reader.Close() +} + +const ( + // HandlerResponseTimeout is what we use to signify the name of the + // response timeout handler. + HandlerResponseTimeout = "ResponseTimeoutHandler" +) + +// adaptToResponseTimeoutError is a handler that will replace any top level error +// to a ErrCodeResponseTimeout, if its child is that. +func adaptToResponseTimeoutError(req *Request) { + if err, ok := req.Error.(awserr.Error); ok { + aerr, ok := err.OrigErr().(awserr.Error) + if ok && aerr.Code() == ErrCodeResponseTimeout { + req.Error = aerr + } + } +} + +// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer. +// This will allow for per read timeouts. If a timeout occurred, we will return the +// ErrCodeResponseTimeout. +// +// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second) +func WithResponseReadTimeout(duration time.Duration) Option { + return func(r *Request) { + + var timeoutHandler = NamedHandler{ + HandlerResponseTimeout, + func(req *Request) { + req.HTTPResponse.Body = &timeoutReadCloser{ + reader: req.HTTPResponse.Body, + duration: duration, + } + }} + + // remove the handler so we are not stomping over any new durations. + r.Handlers.Send.RemoveByName(HandlerResponseTimeout) + r.Handlers.Send.PushBackNamed(timeoutHandler) + + r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError) + r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go new file mode 100644 index 000000000..8630683f3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -0,0 +1,286 @@ +package request + +import ( + "bytes" + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // InvalidParameterErrCode is the error code for invalid parameters errors + InvalidParameterErrCode = "InvalidParameter" + // ParamRequiredErrCode is the error code for required parameter errors + ParamRequiredErrCode = "ParamRequiredError" + // ParamMinValueErrCode is the error code for fields with too low of a + // number value. + ParamMinValueErrCode = "ParamMinValueError" + // ParamMinLenErrCode is the error code for fields without enough elements. + ParamMinLenErrCode = "ParamMinLenError" + // ParamMaxLenErrCode is the error code for value being too long. + ParamMaxLenErrCode = "ParamMaxLenError" + + // ParamFormatErrCode is the error code for a field with invalid + // format or characters. + ParamFormatErrCode = "ParamFormatInvalidError" +) + +// Validator provides a way for types to perform validation logic on their +// input values that external code can use to determine if a type's values +// are valid. +type Validator interface { + Validate() error +} + +// An ErrInvalidParams provides wrapping of invalid parameter errors found when +// validating API operation input parameters. +type ErrInvalidParams struct { + // Context is the base context of the invalid parameter group. + Context string + errs []ErrInvalidParam +} + +// Add adds a new invalid parameter error to the collection of invalid +// parameters. The context of the invalid parameter will be updated to reflect +// this collection. +func (e *ErrInvalidParams) Add(err ErrInvalidParam) { + err.SetContext(e.Context) + e.errs = append(e.errs, err) +} + +// AddNested adds the invalid parameter errors from another ErrInvalidParams +// value into this collection. The nested errors will have their nested context +// updated and base context to reflect the merging. +// +// Use for nested validations errors. +func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { + for _, err := range nested.errs { + err.SetContext(e.Context) + err.AddNestedContext(nestedCtx) + e.errs = append(e.errs, err) + } +} + +// Len returns the number of invalid parameter errors +func (e ErrInvalidParams) Len() int { + return len(e.errs) +} + +// Code returns the code of the error +func (e ErrInvalidParams) Code() string { + return InvalidParameterErrCode +} + +// Message returns the message of the error +func (e ErrInvalidParams) Message() string { + return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) +} + +// Error returns the string formatted form of the invalid parameters. +func (e ErrInvalidParams) Error() string { + w := &bytes.Buffer{} + fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) + + for _, err := range e.errs { + fmt.Fprintf(w, "- %s\n", err.Message()) + } + + return w.String() +} + +// OrigErr returns the invalid parameters as a awserr.BatchedErrors value +func (e ErrInvalidParams) OrigErr() error { + return awserr.NewBatchError( + InvalidParameterErrCode, e.Message(), e.OrigErrs()) +} + +// OrigErrs returns a slice of the invalid parameters +func (e ErrInvalidParams) OrigErrs() []error { + errs := make([]error, len(e.errs)) + for i := 0; i < len(errs); i++ { + errs[i] = e.errs[i] + } + + return errs +} + +// An ErrInvalidParam represents an invalid parameter error type. +type ErrInvalidParam interface { + awserr.Error + + // Field name the error occurred on. + Field() string + + // SetContext updates the context of the error. + SetContext(string) + + // AddNestedContext updates the error's context to include a nested level. + AddNestedContext(string) +} + +type errInvalidParam struct { + context string + nestedContext string + field string + code string + msg string +} + +// Code returns the error code for the type of invalid parameter. +func (e *errInvalidParam) Code() string { + return e.code +} + +// Message returns the reason the parameter was invalid, and its context. +func (e *errInvalidParam) Message() string { + return fmt.Sprintf("%s, %s.", e.msg, e.Field()) +} + +// Error returns the string version of the invalid parameter error. +func (e *errInvalidParam) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.Message()) +} + +// OrigErr returns nil, Implemented for awserr.Error interface. +func (e *errInvalidParam) OrigErr() error { + return nil +} + +// Field Returns the field and context the error occurred. +func (e *errInvalidParam) Field() string { + field := e.context + if len(field) > 0 { + field += "." + } + if len(e.nestedContext) > 0 { + field += fmt.Sprintf("%s.", e.nestedContext) + } + field += e.field + + return field +} + +// SetContext updates the base context of the error. +func (e *errInvalidParam) SetContext(ctx string) { + e.context = ctx +} + +// AddNestedContext prepends a context to the field's path. +func (e *errInvalidParam) AddNestedContext(ctx string) { + if len(e.nestedContext) == 0 { + e.nestedContext = ctx + } else { + e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) + } + +} + +// An ErrParamRequired represents an required parameter error. +type ErrParamRequired struct { + errInvalidParam +} + +// NewErrParamRequired creates a new required parameter error. +func NewErrParamRequired(field string) *ErrParamRequired { + return &ErrParamRequired{ + errInvalidParam{ + code: ParamRequiredErrCode, + field: field, + msg: fmt.Sprintf("missing required field"), + }, + } +} + +// An ErrParamMinValue represents a minimum value parameter error. +type ErrParamMinValue struct { + errInvalidParam + min float64 +} + +// NewErrParamMinValue creates a new minimum value parameter error. +func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { + return &ErrParamMinValue{ + errInvalidParam: errInvalidParam{ + code: ParamMinValueErrCode, + field: field, + msg: fmt.Sprintf("minimum field value of %v", min), + }, + min: min, + } +} + +// MinValue returns the field's require minimum value. +// +// float64 is returned for both int and float min values. +func (e *ErrParamMinValue) MinValue() float64 { + return e.min +} + +// An ErrParamMinLen represents a minimum length parameter error. +type ErrParamMinLen struct { + errInvalidParam + min int +} + +// NewErrParamMinLen creates a new minimum length parameter error. +func NewErrParamMinLen(field string, min int) *ErrParamMinLen { + return &ErrParamMinLen{ + errInvalidParam: errInvalidParam{ + code: ParamMinLenErrCode, + field: field, + msg: fmt.Sprintf("minimum field size of %v", min), + }, + min: min, + } +} + +// MinLen returns the field's required minimum length. +func (e *ErrParamMinLen) MinLen() int { + return e.min +} + +// An ErrParamMaxLen represents a maximum length parameter error. +type ErrParamMaxLen struct { + errInvalidParam + max int +} + +// NewErrParamMaxLen creates a new maximum length parameter error. +func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen { + return &ErrParamMaxLen{ + errInvalidParam: errInvalidParam{ + code: ParamMaxLenErrCode, + field: field, + msg: fmt.Sprintf("maximum size of %v, %v", max, value), + }, + max: max, + } +} + +// MaxLen returns the field's required minimum length. +func (e *ErrParamMaxLen) MaxLen() int { + return e.max +} + +// An ErrParamFormat represents a invalid format parameter error. +type ErrParamFormat struct { + errInvalidParam + format string +} + +// NewErrParamFormat creates a new invalid format parameter error. +func NewErrParamFormat(field string, format, value string) *ErrParamFormat { + return &ErrParamFormat{ + errInvalidParam: errInvalidParam{ + code: ParamFormatErrCode, + field: field, + msg: fmt.Sprintf("format %v, %v", format, value), + }, + format: format, + } +} + +// Format returns the field's required format. +func (e *ErrParamFormat) Format() string { + return e.format +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go new file mode 100644 index 000000000..992ed0464 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go @@ -0,0 +1,304 @@ +package request + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when +// the waiter's max attempts have been exhausted. +const WaiterResourceNotReadyErrorCode = "ResourceNotReady" + +// A WaiterOption is a function that will update the Waiter value's fields to +// configure the waiter. +type WaiterOption func(*Waiter) + +// WithWaiterMaxAttempts returns the maximum number of times the waiter should +// attempt to check the resource for the target state. +func WithWaiterMaxAttempts(max int) WaiterOption { + return func(w *Waiter) { + w.MaxAttempts = max + } +} + +// WaiterDelay will return a delay the waiter should pause between attempts to +// check the resource state. The passed in attempt is the number of times the +// Waiter has checked the resource state. +// +// Attempt is the number of attempts the Waiter has made checking the resource +// state. +type WaiterDelay func(attempt int) time.Duration + +// ConstantWaiterDelay returns a WaiterDelay that will always return a constant +// delay the waiter should use between attempts. It ignores the number of +// attempts made. +func ConstantWaiterDelay(delay time.Duration) WaiterDelay { + return func(attempt int) time.Duration { + return delay + } +} + +// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in. +func WithWaiterDelay(delayer WaiterDelay) WaiterOption { + return func(w *Waiter) { + w.Delay = delayer + } +} + +// WithWaiterLogger returns a waiter option to set the logger a waiter +// should use to log warnings and errors to. +func WithWaiterLogger(logger aws.Logger) WaiterOption { + return func(w *Waiter) { + w.Logger = logger + } +} + +// WithWaiterRequestOptions returns a waiter option setting the request +// options for each request the waiter makes. Appends to waiter's request +// options already set. +func WithWaiterRequestOptions(opts ...Option) WaiterOption { + return func(w *Waiter) { + w.RequestOptions = append(w.RequestOptions, opts...) + } +} + +// A Waiter provides the functionality to perform a blocking call which will +// wait for a resource state to be satisfied by a service. +// +// This type should not be used directly. The API operations provided in the +// service packages prefixed with "WaitUntil" should be used instead. +type Waiter struct { + Name string + Acceptors []WaiterAcceptor + Logger aws.Logger + + MaxAttempts int + Delay WaiterDelay + + RequestOptions []Option + NewRequest func([]Option) (*Request, error) + SleepWithContext func(aws.Context, time.Duration) error +} + +// ApplyOptions updates the waiter with the list of waiter options provided. +func (w *Waiter) ApplyOptions(opts ...WaiterOption) { + for _, fn := range opts { + fn(w) + } +} + +// WaiterState are states the waiter uses based on WaiterAcceptor definitions +// to identify if the resource state the waiter is waiting on has occurred. +type WaiterState int + +// String returns the string representation of the waiter state. +func (s WaiterState) String() string { + switch s { + case SuccessWaiterState: + return "success" + case FailureWaiterState: + return "failure" + case RetryWaiterState: + return "retry" + default: + return "unknown waiter state" + } +} + +// States the waiter acceptors will use to identify target resource states. +const ( + SuccessWaiterState WaiterState = iota // waiter successful + FailureWaiterState // waiter failed + RetryWaiterState // waiter needs to be retried +) + +// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor +// definition's Expected attribute. +type WaiterMatchMode int + +// Modes the waiter will use when inspecting API response to identify target +// resource states. +const ( + PathAllWaiterMatch WaiterMatchMode = iota // match on all paths + PathWaiterMatch // match on specific path + PathAnyWaiterMatch // match on any path + PathListWaiterMatch // match on list of paths + StatusWaiterMatch // match on status code + ErrorWaiterMatch // match on error +) + +// String returns the string representation of the waiter match mode. +func (m WaiterMatchMode) String() string { + switch m { + case PathAllWaiterMatch: + return "pathAll" + case PathWaiterMatch: + return "path" + case PathAnyWaiterMatch: + return "pathAny" + case PathListWaiterMatch: + return "pathList" + case StatusWaiterMatch: + return "status" + case ErrorWaiterMatch: + return "error" + default: + return "unknown waiter match mode" + } +} + +// WaitWithContext will make requests for the API operation using NewRequest to +// build API requests. The request's response will be compared against the +// Waiter's Acceptors to determine the successful state of the resource the +// waiter is inspecting. +// +// The passed in context must not be nil. If it is nil a panic will occur. The +// Context will be used to cancel the waiter's pending requests and retry delays. +// Use aws.BackgroundContext if no context is available. +// +// The waiter will continue until the target state defined by the Acceptors, +// or the max attempts expires. +// +// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's +// retryer ShouldRetry returns false. This normally will happen when the max +// wait attempts expires. +func (w Waiter) WaitWithContext(ctx aws.Context) error { + + for attempt := 1; ; attempt++ { + req, err := w.NewRequest(w.RequestOptions) + if err != nil { + waiterLogf(w.Logger, "unable to create request %v", err) + return err + } + req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter")) + err = req.Send() + + // See if any of the acceptors match the request's response, or error + for _, a := range w.Acceptors { + if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched { + return matchErr + } + } + + // The Waiter should only check the resource state MaxAttempts times + // This is here instead of in the for loop above to prevent delaying + // unnecessary when the waiter will not retry. + if attempt == w.MaxAttempts { + break + } + + // Delay to wait before inspecting the resource again + delay := w.Delay(attempt) + if sleepFn := req.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(delay) + } else { + sleepCtxFn := w.SleepWithContext + if sleepCtxFn == nil { + sleepCtxFn = aws.SleepWithContext + } + + if err := sleepCtxFn(ctx, delay); err != nil { + return awserr.New(CanceledErrorCode, "waiter context canceled", err) + } + } + } + + return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil) +} + +// A WaiterAcceptor provides the information needed to wait for an API operation +// to complete. +type WaiterAcceptor struct { + State WaiterState + Matcher WaiterMatchMode + Argument string + Expected interface{} +} + +// match returns if the acceptor found a match with the passed in request +// or error. True is returned if the acceptor made a match, error is returned +// if there was an error attempting to perform the match. +func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) { + result := false + var vals []interface{} + + switch a.Matcher { + case PathAllWaiterMatch, PathWaiterMatch: + // Require all matches to be equal for result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + if len(vals) == 0 { + break + } + result = true + for _, val := range vals { + if !awsutil.DeepEqual(val, a.Expected) { + result = false + break + } + } + case PathAnyWaiterMatch: + // Only a single match needs to equal for the result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + for _, val := range vals { + if awsutil.DeepEqual(val, a.Expected) { + result = true + break + } + } + case PathListWaiterMatch: + // ignored matcher + case StatusWaiterMatch: + s := a.Expected.(int) + result = s == req.HTTPResponse.StatusCode + case ErrorWaiterMatch: + switch ex := a.Expected.(type) { + case string: + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == ex + } + case bool: + if ex { + result = err != nil + } else { + result = err == nil + } + } + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", + name, a.Matcher) + } + + if !result { + // If there was no matching result found there is nothing more to do + // for this response, retry the request. + return false, nil + } + + switch a.State { + case SuccessWaiterState: + // waiter completed + return true, nil + case FailureWaiterState: + // Waiter failure state triggered + return true, awserr.New(WaiterResourceNotReadyErrorCode, + "failed waiting for successful resource state", err) + case RetryWaiterState: + // clear the error and retry the operation + return false, nil + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s", + name, a.State) + return false, nil + } +} + +func waiterLogf(logger aws.Logger, msg string, args ...interface{}) { + if logger != nil { + logger.Log(fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go new file mode 100644 index 000000000..ea8e35376 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -0,0 +1,333 @@ +package session + +import ( + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/processcreds" + "github.com/aws/aws-sdk-go/aws/credentials/ssocreds" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" + "github.com/aws/aws-sdk-go/service/ssooidc" + "github.com/aws/aws-sdk-go/service/sts" +) + +// CredentialsProviderOptions specifies additional options for configuring +// credentials providers. +type CredentialsProviderOptions struct { + // WebIdentityRoleProviderOptions configures a WebIdentityRoleProvider, + // such as setting its ExpiryWindow. + WebIdentityRoleProviderOptions func(*stscreds.WebIdentityRoleProvider) + + // ProcessProviderOptions configures a ProcessProvider, + // such as setting its Timeout. + ProcessProviderOptions func(*processcreds.ProcessProvider) +} + +func resolveCredentials(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (*credentials.Credentials, error) { + + switch { + case len(sessOpts.Profile) != 0: + // User explicitly provided a Profile in the session's configuration + // so load that profile from shared config first. + // Github(aws/aws-sdk-go#2727) + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + + case envCfg.Creds.HasKeys(): + // Environment credentials + return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil + + case len(envCfg.WebIdentityTokenFilePath) != 0: + // Web identity token from environment, RoleARN required to also be + // set. + return assumeWebIdentity(cfg, handlers, + envCfg.WebIdentityTokenFilePath, + envCfg.RoleARN, + envCfg.RoleSessionName, + sessOpts.CredentialsProviderOptions, + ) + + default: + // Fallback to the "default" credential resolution chain. + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + } +} + +// WebIdentityEmptyRoleARNErr will occur if 'AWS_WEB_IDENTITY_TOKEN_FILE' was set but +// 'AWS_ROLE_ARN' was not set. +var WebIdentityEmptyRoleARNErr = awserr.New(stscreds.ErrCodeWebIdentity, "role ARN is not set", nil) + +// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_ROLE_ARN' was set but +// 'AWS_WEB_IDENTITY_TOKEN_FILE' was not set. +var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, "token file path is not set", nil) + +func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers, + filepath string, + roleARN, sessionName string, + credOptions *CredentialsProviderOptions, +) (*credentials.Credentials, error) { + + if len(filepath) == 0 { + return nil, WebIdentityEmptyTokenFilePathErr + } + + if len(roleARN) == 0 { + return nil, WebIdentityEmptyRoleARNErr + } + + svc := sts.New(&Session{ + Config: cfg, + Handlers: handlers.Copy(), + }) + + var optFns []func(*stscreds.WebIdentityRoleProvider) + if credOptions != nil && credOptions.WebIdentityRoleProviderOptions != nil { + optFns = append(optFns, credOptions.WebIdentityRoleProviderOptions) + } + + p := stscreds.NewWebIdentityRoleProviderWithOptions(svc, roleARN, sessionName, stscreds.FetchTokenPath(filepath), optFns...) + return credentials.NewCredentials(p), nil +} + +func resolveCredsFromProfile(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch { + case sharedCfg.SourceProfile != nil: + // Assume IAM role with credentials source from a different profile. + creds, err = resolveCredsFromProfile(cfg, envCfg, + *sharedCfg.SourceProfile, handlers, sessOpts, + ) + + case sharedCfg.Creds.HasKeys(): + // Static Credentials from Shared Config/Credentials file. + creds = credentials.NewStaticCredentialsFromCreds( + sharedCfg.Creds, + ) + + case len(sharedCfg.CredentialSource) != 0: + creds, err = resolveCredsFromSource(cfg, envCfg, + sharedCfg, handlers, sessOpts, + ) + + case len(sharedCfg.WebIdentityTokenFile) != 0: + // Credentials from Assume Web Identity token require an IAM Role, and + // that roll will be assumed. May be wrapped with another assume role + // via SourceProfile. + return assumeWebIdentity(cfg, handlers, + sharedCfg.WebIdentityTokenFile, + sharedCfg.RoleARN, + sharedCfg.RoleSessionName, + sessOpts.CredentialsProviderOptions, + ) + + case sharedCfg.hasSSOConfiguration(): + creds, err = resolveSSOCredentials(cfg, sharedCfg, handlers) + + case len(sharedCfg.CredentialProcess) != 0: + // Get credentials from CredentialProcess + var optFns []func(*processcreds.ProcessProvider) + if sessOpts.CredentialsProviderOptions != nil && sessOpts.CredentialsProviderOptions.ProcessProviderOptions != nil { + optFns = append(optFns, sessOpts.CredentialsProviderOptions.ProcessProviderOptions) + } + creds = processcreds.NewCredentials(sharedCfg.CredentialProcess, optFns...) + + default: + // Fallback to default credentials provider, include mock errors for + // the credential chain so user can identify why credentials failed to + // be retrieved. + creds = credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credProviderError{ + Err: awserr.New("EnvAccessKeyNotFound", + "failed to find credentials in the environment.", nil), + }, + &credProviderError{ + Err: awserr.New("SharedCredsLoad", + fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil), + }, + defaults.RemoteCredProvider(*cfg, handlers), + }, + }) + } + if err != nil { + return nil, err + } + + if len(sharedCfg.RoleARN) > 0 { + cfgCp := *cfg + cfgCp.Credentials = creds + return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts) + } + + return creds, nil +} + +func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers request.Handlers) (*credentials.Credentials, error) { + if err := sharedCfg.validateSSOConfiguration(); err != nil { + return nil, err + } + + var optFns []func(provider *ssocreds.Provider) + cfgCopy := cfg.Copy() + + if sharedCfg.SSOSession != nil { + cfgCopy.Region = &sharedCfg.SSOSession.SSORegion + cachedPath, err := ssocreds.StandardCachedTokenFilepath(sharedCfg.SSOSession.Name) + if err != nil { + return nil, err + } + // create oidcClient with AnonymousCredentials to avoid recursively resolving credentials + mySession := Must(NewSession(&aws.Config{ + Credentials: credentials.AnonymousCredentials, + })) + oidcClient := ssooidc.New(mySession, cfgCopy) + tokenProvider := ssocreds.NewSSOTokenProvider(oidcClient, cachedPath) + optFns = append(optFns, func(p *ssocreds.Provider) { + p.TokenProvider = tokenProvider + p.CachedTokenFilepath = cachedPath + }) + } else { + cfgCopy.Region = &sharedCfg.SSORegion + } + + return ssocreds.NewCredentials( + &Session{ + Config: cfgCopy, + Handlers: handlers.Copy(), + }, + sharedCfg.SSOAccountID, + sharedCfg.SSORoleName, + sharedCfg.SSOStartURL, + optFns..., + ), nil +} + +// valid credential source values +const ( + credSourceEc2Metadata = "Ec2InstanceMetadata" + credSourceEnvironment = "Environment" + credSourceECSContainer = "EcsContainer" +) + +func resolveCredsFromSource(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch sharedCfg.CredentialSource { + case credSourceEc2Metadata: + p := defaults.RemoteCredProvider(*cfg, handlers) + creds = credentials.NewCredentials(p) + + case credSourceEnvironment: + creds = credentials.NewStaticCredentialsFromCreds(envCfg.Creds) + + case credSourceECSContainer: + if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { + return nil, ErrSharedConfigECSContainerEnvVarEmpty + } + + p := defaults.RemoteCredProvider(*cfg, handlers) + creds = credentials.NewCredentials(p) + + default: + return nil, ErrSharedConfigInvalidCredSource + } + + return creds, nil +} + +func credsFromAssumeRole(cfg aws.Config, + handlers request.Handlers, + sharedCfg sharedConfig, + sessOpts Options, +) (*credentials.Credentials, error) { + + if len(sharedCfg.MFASerial) != 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return nil, AssumeRoleTokenProviderNotSetError{} + } + + return stscreds.NewCredentials( + &Session{ + Config: &cfg, + Handlers: handlers.Copy(), + }, + sharedCfg.RoleARN, + func(opt *stscreds.AssumeRoleProvider) { + opt.RoleSessionName = sharedCfg.RoleSessionName + + if sessOpts.AssumeRoleDuration == 0 && + sharedCfg.AssumeRoleDuration != nil && + *sharedCfg.AssumeRoleDuration/time.Minute > 15 { + opt.Duration = *sharedCfg.AssumeRoleDuration + } else if sessOpts.AssumeRoleDuration != 0 { + opt.Duration = sessOpts.AssumeRoleDuration + } + + // Assume role with external ID + if len(sharedCfg.ExternalID) > 0 { + opt.ExternalID = aws.String(sharedCfg.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.MFASerial) > 0 { + opt.SerialNumber = aws.String(sharedCfg.MFASerial) + opt.TokenProvider = sessOpts.AssumeRoleTokenProvider + } + }, + ), nil +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a +// session when the MFAToken option is not set when shared config is configured +// load assume a role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Code is the short id of the error. +func (e AssumeRoleTokenProviderNotSetError) Code() string { + return "AssumeRoleTokenProviderNotSetError" +} + +// Message is the description of the error +func (e AssumeRoleTokenProviderNotSetError) Message() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +// OrigErr is the underlying error that caused the failure. +func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +type credProviderError struct { + Err error +} + +func (c credProviderError) Retrieve() (credentials.Value, error) { + return credentials.Value{}, c.Err +} +func (c credProviderError) IsExpired() bool { + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go new file mode 100644 index 000000000..4390ad52f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go @@ -0,0 +1,28 @@ +//go:build go1.13 +// +build go1.13 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCustomTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go new file mode 100644 index 000000000..668565bea --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go @@ -0,0 +1,27 @@ +//go:build !go1.13 && go1.7 +// +build !go1.13,go1.7 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCustomTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go new file mode 100644 index 000000000..e101aa6b6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go @@ -0,0 +1,23 @@ +//go:build !go1.6 && go1.5 +// +build !go1.6,go1.5 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCustomTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go new file mode 100644 index 000000000..b5fcbe0d1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go @@ -0,0 +1,24 @@ +//go:build !go1.7 && go1.6 +// +build !go1.7,go1.6 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCustomTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go new file mode 100644 index 000000000..ff3cc012a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -0,0 +1,367 @@ +/* +Package session provides configuration for the SDK's service clients. Sessions +can be shared across service clients that share the same base configuration. + +Sessions are safe to use concurrently as long as the Session is not being +modified. Sessions should be cached when possible, because creating a new +Session will load all configuration values from the environment, and config +files each time the Session is created. Sharing the Session value across all of +your service clients will ensure the configuration is loaded the fewest number +of times possible. + +Sessions options from Shared Config + +By default NewSession will only load credentials from the shared credentials +file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is +set to a truthy value the Session will be created from the configuration +values from the shared config (~/.aws/config) and shared credentials +(~/.aws/credentials) files. Using the NewSessionWithOptions with +SharedConfigState set to SharedConfigEnable will create the session as if the +AWS_SDK_LOAD_CONFIG environment variable was set. + +Credential and config loading order + +The Session will attempt to load configuration and credentials from the +environment, configuration files, and other credential sources. The order +configuration is loaded in is: + + * Environment Variables + * Shared Credentials file + * Shared Configuration file (if SharedConfig is enabled) + * EC2 Instance Metadata (credentials only) + +The Environment variables for credentials will have precedence over shared +config even if SharedConfig is enabled. To override this behavior, and use +shared config credentials instead specify the session.Options.Profile, (e.g. +when using credential_source=Environment to assume a role). + + sess, err := session.NewSessionWithOptions(session.Options{ + Profile: "myProfile", + }) + +Creating Sessions + +Creating a Session without additional options will load credentials region, and +profile loaded from the environment and shared config automatically. See, +"Environment Variables" section for information on environment variables used +by Session. + + // Create Session + sess, err := session.NewSession() + + +When creating Sessions optional aws.Config values can be passed in that will +override the default, or loaded, config values the Session is being created +with. This allows you to provide additional, or case based, configuration +as needed. + + // Create a Session with a custom region + sess, err := session.NewSession(&aws.Config{ + Region: aws.String("us-west-2"), + }) + +Use NewSessionWithOptions to provide additional configuration driving how the +Session's configuration will be loaded. Such as, specifying shared config +profile, or override the shared config state, (AWS_SDK_LOAD_CONFIG). + + // Equivalent to session.NewSession() + sess, err := session.NewSessionWithOptions(session.Options{ + // Options + }) + + sess, err := session.NewSessionWithOptions(session.Options{ + // Specify profile to load for the session's config + Profile: "profile_name", + + // Provide SDK Config options, such as Region. + Config: aws.Config{ + Region: aws.String("us-west-2"), + }, + + // Force enable Shared Config support + SharedConfigState: session.SharedConfigEnable, + }) + +Adding Handlers + +You can add handlers to a session to decorate API operation, (e.g. adding HTTP +headers). All clients that use the Session receive a copy of the Session's +handlers. For example, the following request handler added to the Session logs +every requests made. + + // Create a session, and add additional handlers for all service + // clients created with the Session to inherit. Adds logging handler. + sess := session.Must(session.NewSession()) + + sess.Handlers.Send.PushFront(func(r *request.Request) { + // Log every request made and its payload + logger.Printf("Request: %s/%s, Params: %s", + r.ClientInfo.ServiceName, r.Operation, r.Params) + }) + +Shared Config Fields + +By default the SDK will only load the shared credentials file's +(~/.aws/credentials) credentials values, and all other config is provided by +the environment variables, SDK defaults, and user provided aws.Config values. + +If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable +option is used to create the Session the full shared config values will be +loaded. This includes credentials, region, and support for assume role. In +addition the Session will load its configuration from both the shared config +file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both +files have the same format. + +If both config files are present the configuration from both files will be +read. The Session will be created from configuration values from the shared +credentials file (~/.aws/credentials) over those in the shared config file +(~/.aws/config). + +Credentials are the values the SDK uses to authenticating requests with AWS +Services. When specified in a file, both aws_access_key_id and +aws_secret_access_key must be provided together in the same file to be +considered valid. They will be ignored if both are not present. +aws_session_token is an optional field that can be provided in addition to the +other two fields. + + aws_access_key_id = AKID + aws_secret_access_key = SECRET + aws_session_token = TOKEN + + ; region only supported if SharedConfigEnabled. + region = us-east-1 + +Assume Role configuration + +The role_arn field allows you to configure the SDK to assume an IAM role using +a set of credentials from another source. Such as when paired with static +credentials, "profile_source", "credential_process", or "credential_source" +fields. If "role_arn" is provided, a source of credentials must also be +specified, such as "source_profile", "credential_source", or +"credential_process". + + role_arn = arn:aws:iam:::role/ + source_profile = profile_with_creds + external_id = 1234 + mfa_serial = + role_session_name = session_name + + +The SDK supports assuming a role with MFA token. If "mfa_serial" is set, you +must also set the Session Option.AssumeRoleTokenProvider. The Session will fail +to load if the AssumeRoleTokenProvider is not specified. + + sess := session.Must(session.NewSessionWithOptions(session.Options{ + AssumeRoleTokenProvider: stscreds.StdinTokenProvider, + })) + +To setup Assume Role outside of a session see the stscreds.AssumeRoleProvider +documentation. + +Environment Variables + +When a Session is created several environment variables can be set to adjust +how the SDK functions, and what configuration data it loads when creating +Sessions. All environment values are optional, but some values like credentials +require multiple of the values to set or the partial values will be ignored. +All environment variable values are strings unless otherwise noted. + +Environment configuration values. If set both Access Key ID and Secret Access +Key must be provided. Session Token and optionally also be provided, but is +not required. + + # Access Key ID + AWS_ACCESS_KEY_ID=AKID + AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + + # Secret Access Key + AWS_SECRET_ACCESS_KEY=SECRET + AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + + # Session Token + AWS_SESSION_TOKEN=TOKEN + +Region value will instruct the SDK where to make service API requests to. If is +not provided in the environment the region must be provided before a service +client request is made. + + AWS_REGION=us-east-1 + + # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_REGION is not also set. + AWS_DEFAULT_REGION=us-east-1 + +Profile name the SDK should load use when loading shared config from the +configuration files. If not provided "default" will be used as the profile name. + + AWS_PROFILE=my_profile + + # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_PROFILE is not also set. + AWS_DEFAULT_PROFILE=my_profile + +SDK load config instructs the SDK to load the shared config in addition to +shared credentials. This also expands the configuration loaded so the shared +credentials will have parity with the shared config file. This also enables +Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE +env values as well. + + AWS_SDK_LOAD_CONFIG=1 + +Custom Shared Config and Credential Files + +Shared credentials file path can be set to instruct the SDK to use an alternative +file for the shared credentials. If not set the file will be loaded from +$HOME/.aws/credentials on Linux/Unix based systems, and +%USERPROFILE%\.aws\credentials on Windows. + + AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + +Shared config file path can be set to instruct the SDK to use an alternative +file for the shared config. If not set the file will be loaded from +$HOME/.aws/config on Linux/Unix based systems, and +%USERPROFILE%\.aws\config on Windows. + + AWS_CONFIG_FILE=$HOME/my_shared_config + +Custom CA Bundle + +Path to a custom Credentials Authority (CA) bundle PEM file that the SDK +will use instead of the default system's root CA bundle. Use this only +if you want to replace the CA bundle the SDK uses for TLS requests. + + AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + +Enabling this option will attempt to merge the Transport into the SDK's HTTP +client. If the client's Transport is not a http.Transport an error will be +returned. If the Transport's TLS config is set this option will cause the SDK +to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file +contains multiple certificates all of them will be loaded. + +The Session option CustomCABundle is also available when creating sessions +to also enable this feature. CustomCABundle session option field has priority +over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + +Setting a custom HTTPClient in the aws.Config options will override this setting. +To use this option and custom HTTP client, the HTTP client needs to be provided +when creating the session. Not the service client. + +Custom Client TLS Certificate + +The SDK supports the environment and session option being configured with +Client TLS certificates that are sent as a part of the client's TLS handshake +for client authentication. If used, both Cert and Key values are required. If +one is missing, or either fail to load the contents of the file an error will +be returned. + +HTTP Client's Transport concrete implementation must be a http.Transport +or creating the session will fail. + + AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key + AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert + +This can also be configured via the session.Options ClientTLSCert and ClientTLSKey. + + sess, err := session.NewSessionWithOptions(session.Options{ + ClientTLSCert: myCertFile, + ClientTLSKey: myKeyFile, + }) + +Custom EC2 IMDS Endpoint + +The endpoint of the EC2 IMDS client can be configured via the environment +variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a +Session. See Options.EC2IMDSEndpoint for more details. + + AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254 + +If using an URL with an IPv6 address literal, the IPv6 address +component must be enclosed in square brackets. + + AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + +The custom EC2 IMDS endpoint can also be specified via the Session options. + + sess, err := session.NewSessionWithOptions(session.Options{ + EC2MetadataEndpoint: "http://[::1]", + }) + +FIPS and DualStack Endpoints + +The SDK can be configured to resolve an endpoint with certain capabilities such as FIPS and DualStack. + +You can configure a FIPS endpoint using an environment variable, shared config ($HOME/.aws/config), +or programmatically. + +To configure a FIPS endpoint set the environment variable set the AWS_USE_FIPS_ENDPOINT to true or false to enable +or disable FIPS endpoint resolution. + + AWS_USE_FIPS_ENDPOINT=true + +To configure a FIPS endpoint using shared config, set use_fips_endpoint to true or false to enable +or disable FIPS endpoint resolution. + + [profile myprofile] + region=us-west-2 + use_fips_endpoint=true + +To configure a FIPS endpoint programmatically + + // Option 1: Configure it on a session for all clients + sess, err := session.NewSessionWithOptions(session.Options{ + UseFIPSEndpoint: endpoints.FIPSEndpointStateEnabled, + }) + if err != nil { + // handle error + } + + client := s3.New(sess) + + // Option 2: Configure it per client + sess, err := session.NewSession() + if err != nil { + // handle error + } + + client := s3.New(sess, &aws.Config{ + UseFIPSEndpoint: endpoints.FIPSEndpointStateEnabled, + }) + +You can configure a DualStack endpoint using an environment variable, shared config ($HOME/.aws/config), +or programmatically. + +To configure a DualStack endpoint set the environment variable set the AWS_USE_DUALSTACK_ENDPOINT to true or false to +enable or disable DualStack endpoint resolution. + + AWS_USE_DUALSTACK_ENDPOINT=true + +To configure a DualStack endpoint using shared config, set use_dualstack_endpoint to true or false to enable +or disable DualStack endpoint resolution. + + [profile myprofile] + region=us-west-2 + use_dualstack_endpoint=true + +To configure a DualStack endpoint programmatically + + // Option 1: Configure it on a session for all clients + sess, err := session.NewSessionWithOptions(session.Options{ + UseDualStackEndpoint: endpoints.DualStackEndpointStateEnabled, + }) + if err != nil { + // handle error + } + + client := s3.New(sess) + + // Option 2: Configure it per client + sess, err := session.NewSession() + if err != nil { + // handle error + } + + client := s3.New(sess, &aws.Config{ + UseDualStackEndpoint: endpoints.DualStackEndpointStateEnabled, + }) +*/ +package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go new file mode 100644 index 000000000..93bb5de64 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -0,0 +1,499 @@ +package session + +import ( + "fmt" + "os" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" +) + +// EnvProviderName provides a name of the provider when config is loaded from environment. +const EnvProviderName = "EnvConfigCredentials" + +// envConfig is a collection of environment values the SDK will read +// setup config from. All environment values are optional. But some values +// such as credentials require multiple values to be complete or the values +// will be ignored. +type envConfig struct { + // Environment configuration values. If set both Access Key ID and Secret Access + // Key must be provided. Session Token and optionally also be provided, but is + // not required. + // + // # Access Key ID + // AWS_ACCESS_KEY_ID=AKID + // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + // + // # Secret Access Key + // AWS_SECRET_ACCESS_KEY=SECRET + // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + // + // # Session Token + // AWS_SESSION_TOKEN=TOKEN + Creds credentials.Value + + // Region value will instruct the SDK where to make service API requests to. If is + // not provided in the environment the region must be provided before a service + // client request is made. + // + // AWS_REGION=us-east-1 + // + // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_REGION is not also set. + // AWS_DEFAULT_REGION=us-east-1 + Region string + + // Profile name the SDK should load use when loading shared configuration from the + // shared configuration files. If not provided "default" will be used as the + // profile name. + // + // AWS_PROFILE=my_profile + // + // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_PROFILE is not also set. + // AWS_DEFAULT_PROFILE=my_profile + Profile string + + // SDK load config instructs the SDK to load the shared config in addition to + // shared credentials. This also expands the configuration loaded from the shared + // credentials to have parity with the shared config file. This also enables + // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE + // env values as well. + // + // AWS_SDK_LOAD_CONFIG=1 + EnableSharedConfig bool + + // Shared credentials file path can be set to instruct the SDK to use an alternate + // file for the shared credentials. If not set the file will be loaded from + // $HOME/.aws/credentials on Linux/Unix based systems, and + // %USERPROFILE%\.aws\credentials on Windows. + // + // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + SharedCredentialsFile string + + // Shared config file path can be set to instruct the SDK to use an alternate + // file for the shared config. If not set the file will be loaded from + // $HOME/.aws/config on Linux/Unix based systems, and + // %USERPROFILE%\.aws\config on Windows. + // + // AWS_CONFIG_FILE=$HOME/my_shared_config + SharedConfigFile string + + // Sets the path to a custom Credentials Authority (CA) Bundle PEM file + // that the SDK will use instead of the system's root CA bundle. + // Only use this if you want to configure the SDK to use a custom set + // of CAs. + // + // Enabling this option will attempt to merge the Transport + // into the SDK's HTTP client. If the client's Transport is + // not a http.Transport an error will be returned. If the + // Transport's TLS config is set this option will cause the + // SDK to overwrite the Transport's TLS config's RootCAs value. + // + // Setting a custom HTTPClient in the aws.Config options will override this setting. + // To use this option and custom HTTP client, the HTTP client needs to be provided + // when creating the session. Not the service client. + // + // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + CustomCABundle string + + // Sets the TLC client certificate that should be used by the SDK's HTTP transport + // when making requests. The certificate must be paired with a TLS client key file. + // + // AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert + ClientTLSCert string + + // Sets the TLC client key that should be used by the SDK's HTTP transport + // when making requests. The key must be paired with a TLS client certificate file. + // + // AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key + ClientTLSKey string + + csmEnabled string + CSMEnabled *bool + CSMPort string + CSMHost string + CSMClientID string + + // Enables endpoint discovery via environment variables. + // + // AWS_ENABLE_ENDPOINT_DISCOVERY=true + EnableEndpointDiscovery *bool + enableEndpointDiscovery string + + // Specifies the WebIdentity token the SDK should use to assume a role + // with. + // + // AWS_WEB_IDENTITY_TOKEN_FILE=file_path + WebIdentityTokenFilePath string + + // Specifies the IAM role arn to use when assuming an role. + // + // AWS_ROLE_ARN=role_arn + RoleARN string + + // Specifies the IAM role session name to use when assuming a role. + // + // AWS_ROLE_SESSION_NAME=session_name + RoleSessionName string + + // Specifies the STS Regional Endpoint flag for the SDK to resolve the endpoint + // for a service. + // + // AWS_STS_REGIONAL_ENDPOINTS=regional + // This can take value as `regional` or `legacy` + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // Specifies the S3 Regional Endpoint flag for the SDK to resolve the + // endpoint for a service. + // + // AWS_S3_US_EAST_1_REGIONAL_ENDPOINT=regional + // This can take value as `regional` or `legacy` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // AWS_S3_USE_ARN_REGION=true + S3UseARNRegion bool + + // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + EC2IMDSEndpoint string + + // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 + EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState + + // Specifies that IMDS clients should not fallback to IMDSv1 if token + // requests fail. + // + // AWS_EC2_METADATA_V1_DISABLED=true + EC2IMDSv1Disabled *bool + + // Specifies that SDK clients must resolve a dual-stack endpoint for + // services. + // + // AWS_USE_DUALSTACK_ENDPOINT=true + UseDualStackEndpoint endpoints.DualStackEndpointState + + // Specifies that SDK clients must resolve a FIPS endpoint for + // services. + // + // AWS_USE_FIPS_ENDPOINT=true + UseFIPSEndpoint endpoints.FIPSEndpointState +} + +var ( + csmEnabledEnvKey = []string{ + "AWS_CSM_ENABLED", + } + csmHostEnvKey = []string{ + "AWS_CSM_HOST", + } + csmPortEnvKey = []string{ + "AWS_CSM_PORT", + } + csmClientIDEnvKey = []string{ + "AWS_CSM_CLIENT_ID", + } + credAccessEnvKey = []string{ + "AWS_ACCESS_KEY_ID", + "AWS_ACCESS_KEY", + } + credSecretEnvKey = []string{ + "AWS_SECRET_ACCESS_KEY", + "AWS_SECRET_KEY", + } + credSessionEnvKey = []string{ + "AWS_SESSION_TOKEN", + } + + enableEndpointDiscoveryEnvKey = []string{ + "AWS_ENABLE_ENDPOINT_DISCOVERY", + } + + regionEnvKeys = []string{ + "AWS_REGION", + "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + profileEnvKeys = []string{ + "AWS_PROFILE", + "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + sharedCredsFileEnvKey = []string{ + "AWS_SHARED_CREDENTIALS_FILE", + } + sharedConfigFileEnvKey = []string{ + "AWS_CONFIG_FILE", + } + webIdentityTokenFilePathEnvKey = []string{ + "AWS_WEB_IDENTITY_TOKEN_FILE", + } + roleARNEnvKey = []string{ + "AWS_ROLE_ARN", + } + roleSessionNameEnvKey = []string{ + "AWS_ROLE_SESSION_NAME", + } + stsRegionalEndpointKey = []string{ + "AWS_STS_REGIONAL_ENDPOINTS", + } + s3UsEast1RegionalEndpoint = []string{ + "AWS_S3_US_EAST_1_REGIONAL_ENDPOINT", + } + s3UseARNRegionEnvKey = []string{ + "AWS_S3_USE_ARN_REGION", + } + ec2IMDSEndpointEnvKey = []string{ + "AWS_EC2_METADATA_SERVICE_ENDPOINT", + } + ec2IMDSEndpointModeEnvKey = []string{ + "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE", + } + ec2MetadataV1DisabledEnvKey = []string{ + "AWS_EC2_METADATA_V1_DISABLED", + } + useCABundleKey = []string{ + "AWS_CA_BUNDLE", + } + useClientTLSCert = []string{ + "AWS_SDK_GO_CLIENT_TLS_CERT", + } + useClientTLSKey = []string{ + "AWS_SDK_GO_CLIENT_TLS_KEY", + } + awsUseDualStackEndpoint = []string{ + "AWS_USE_DUALSTACK_ENDPOINT", + } + awsUseFIPSEndpoint = []string{ + "AWS_USE_FIPS_ENDPOINT", + } +) + +// loadEnvConfig retrieves the SDK's environment configuration. +// See `envConfig` for the values that will be retrieved. +// +// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value +// the shared SDK config will be loaded in addition to the SDK's specific +// configuration values. +func loadEnvConfig() (envConfig, error) { + enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) + return envConfigLoad(enableSharedConfig) +} + +// loadEnvSharedConfig retrieves the SDK's environment configuration, and the +// SDK shared config. See `envConfig` for the values that will be retrieved. +// +// Loads the shared configuration in addition to the SDK's specific configuration. +// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` +// environment variable is set. +func loadSharedEnvConfig() (envConfig, error) { + return envConfigLoad(true) +} + +func envConfigLoad(enableSharedConfig bool) (envConfig, error) { + cfg := envConfig{} + + cfg.EnableSharedConfig = enableSharedConfig + + // Static environment credentials + var creds credentials.Value + setFromEnvVal(&creds.AccessKeyID, credAccessEnvKey) + setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKey) + setFromEnvVal(&creds.SessionToken, credSessionEnvKey) + if creds.HasKeys() { + // Require logical grouping of credentials + creds.ProviderName = EnvProviderName + cfg.Creds = creds + } + + // Role Metadata + setFromEnvVal(&cfg.RoleARN, roleARNEnvKey) + setFromEnvVal(&cfg.RoleSessionName, roleSessionNameEnvKey) + + // Web identity environment variables + setFromEnvVal(&cfg.WebIdentityTokenFilePath, webIdentityTokenFilePathEnvKey) + + // CSM environment variables + setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) + setFromEnvVal(&cfg.CSMHost, csmHostEnvKey) + setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) + setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) + + if len(cfg.csmEnabled) != 0 { + v, _ := strconv.ParseBool(cfg.csmEnabled) + cfg.CSMEnabled = &v + } + + regionKeys := regionEnvKeys + profileKeys := profileEnvKeys + if !cfg.EnableSharedConfig { + regionKeys = regionKeys[:1] + profileKeys = profileKeys[:1] + } + + setFromEnvVal(&cfg.Region, regionKeys) + setFromEnvVal(&cfg.Profile, profileKeys) + + // endpoint discovery is in reference to it being enabled. + setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey) + if len(cfg.enableEndpointDiscovery) > 0 { + cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false") + } + + setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) + setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) + + if len(cfg.SharedCredentialsFile) == 0 { + cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() + } + if len(cfg.SharedConfigFile) == 0 { + cfg.SharedConfigFile = defaults.SharedConfigFilename() + } + + setFromEnvVal(&cfg.CustomCABundle, useCABundleKey) + setFromEnvVal(&cfg.ClientTLSCert, useClientTLSCert) + setFromEnvVal(&cfg.ClientTLSKey, useClientTLSKey) + + var err error + // STS Regional Endpoint variable + for _, k := range stsRegionalEndpointKey { + if v := os.Getenv(k); len(v) != 0 { + cfg.STSRegionalEndpoint, err = endpoints.GetSTSRegionalEndpoint(v) + if err != nil { + return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) + } + } + } + + // S3 Regional Endpoint variable + for _, k := range s3UsEast1RegionalEndpoint { + if v := os.Getenv(k); len(v) != 0 { + cfg.S3UsEast1RegionalEndpoint, err = endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) + } + } + } + + var s3UseARNRegion string + setFromEnvVal(&s3UseARNRegion, s3UseARNRegionEnvKey) + if len(s3UseARNRegion) != 0 { + switch { + case strings.EqualFold(s3UseARNRegion, "false"): + cfg.S3UseARNRegion = false + case strings.EqualFold(s3UseARNRegion, "true"): + cfg.S3UseARNRegion = true + default: + return envConfig{}, fmt.Errorf( + "invalid value for environment variable, %s=%s, need true or false", + s3UseARNRegionEnvKey[0], s3UseARNRegion) + } + } + + setFromEnvVal(&cfg.EC2IMDSEndpoint, ec2IMDSEndpointEnvKey) + if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, ec2IMDSEndpointModeEnvKey); err != nil { + return envConfig{}, err + } + setBoolPtrFromEnvVal(&cfg.EC2IMDSv1Disabled, ec2MetadataV1DisabledEnvKey) + + if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, awsUseDualStackEndpoint); err != nil { + return cfg, err + } + + if err := setUseFIPSEndpointFromEnvVal(&cfg.UseFIPSEndpoint, awsUseFIPSEndpoint); err != nil { + return cfg, err + } + + return cfg, nil +} + +func setFromEnvVal(dst *string, keys []string) { + for _, k := range keys { + if v := os.Getenv(k); len(v) != 0 { + *dst = v + break + } + } +} + +func setBoolPtrFromEnvVal(dst **bool, keys []string) { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + + switch { + case strings.EqualFold(value, "false"): + *dst = new(bool) + **dst = false + case strings.EqualFold(value, "true"): + *dst = new(bool) + **dst = true + } + } +} + +func setEC2IMDSEndpointMode(mode *endpoints.EC2IMDSEndpointModeState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + if err := mode.SetFromString(value); err != nil { + return fmt.Errorf("invalid value for environment variable, %s=%s, %v", k, value, err) + } + return nil + } + return nil +} + +func setUseDualStackEndpointFromEnvVal(dst *endpoints.DualStackEndpointState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue // skip if empty + } + + switch { + case strings.EqualFold(value, "true"): + *dst = endpoints.DualStackEndpointStateEnabled + case strings.EqualFold(value, "false"): + *dst = endpoints.DualStackEndpointStateDisabled + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true, false", + k, value) + } + } + return nil +} + +func setUseFIPSEndpointFromEnvVal(dst *endpoints.FIPSEndpointState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue // skip if empty + } + + switch { + case strings.EqualFold(value, "true"): + *dst = endpoints.FIPSEndpointStateEnabled + case strings.EqualFold(value, "false"): + *dst = endpoints.FIPSEndpointStateDisabled + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true, false", + k, value) + } + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go new file mode 100644 index 000000000..3c88dee52 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -0,0 +1,1005 @@ +package session + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/csm" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + // ErrCodeSharedConfig represents an error that occurs in the shared + // configuration logic + ErrCodeSharedConfig = "SharedConfigErr" + + // ErrCodeLoadCustomCABundle error code for unable to load custom CA bundle. + ErrCodeLoadCustomCABundle = "LoadCustomCABundleError" + + // ErrCodeLoadClientTLSCert error code for unable to load client TLS + // certificate or key + ErrCodeLoadClientTLSCert = "LoadClientTLSCertError" +) + +// ErrSharedConfigSourceCollision will be returned if a section contains both +// source_profile and credential_source +var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only one credential type may be specified per profile: source profile, credential source, credential process, web identity token", nil) + +// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment +// variables are empty and Environment was set as the credential source +var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil) + +// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided +var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil) + +// A Session provides a central location to create service clients from and +// store configurations and request handlers for those services. +// +// Sessions are safe to create service clients concurrently, but it is not safe +// to mutate the Session concurrently. +// +// The Session satisfies the service client's client.ConfigProvider. +type Session struct { + Config *aws.Config + Handlers request.Handlers + + options Options +} + +// New creates a new instance of the handlers merging in the provided configs +// on top of the SDK's default configurations. Once the Session is created it +// can be mutated to modify the Config or Handlers. The Session is safe to be +// read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New +// method could now encounter an error when loading the configuration. When +// The environment variable is set, and an error occurs, New will return a +// session that will fail all requests reporting the error that occurred while +// loading the session. Use NewSession to get the error when creating the +// session. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded, in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. +// +// Deprecated: Use NewSession functions to create sessions instead. NewSession +// has the same functionality as New except an error can be returned when the +// func is called instead of waiting to receive an error until a request is made. +func New(cfgs ...*aws.Config) *Session { + // load initial config from environment + envCfg, envErr := loadEnvConfig() + + if envCfg.EnableSharedConfig { + var cfg aws.Config + cfg.MergeIn(cfgs...) + s, err := NewSessionWithOptions(Options{ + Config: cfg, + SharedConfigState: SharedConfigEnable, + }) + if err != nil { + // Old session.New expected all errors to be discovered when + // a request is made, and would report the errors then. This + // needs to be replicated if an error occurs while creating + // the session. + msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " + + "Use session.NewSession to handle errors occurring during session creation." + + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s = &Session{Config: defaults.Config()} + s.logDeprecatedNewSessionError(msg, err, cfgs) + } + + return s + } + + s := deprecatedNewSession(envCfg, cfgs...) + if envErr != nil { + msg := "failed to load env config" + s.logDeprecatedNewSessionError(msg, envErr, cfgs) + } + + if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger) + if err != nil { + msg := "failed to enable CSM" + s.logDeprecatedNewSessionError(msg, err, cfgs) + } + } + + return s +} + +// NewSession returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. Once the Session is created +// it can be mutated to modify the Config or Handlers. The Session is safe to +// be read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// See the NewSessionWithOptions func for information on how to override or +// control through code how the Session will be created, such as specifying the +// config profile, and controlling if shared config is enabled or not. +func NewSession(cfgs ...*aws.Config) (*Session, error) { + opts := Options{} + opts.Config.MergeIn(cfgs...) + + return NewSessionWithOptions(opts) +} + +// SharedConfigState provides the ability to optionally override the state +// of the session's creation based on the shared config being enabled or +// disabled. +type SharedConfigState int + +const ( + // SharedConfigStateFromEnv does not override any state of the + // AWS_SDK_LOAD_CONFIG env var. It is the default value of the + // SharedConfigState type. + SharedConfigStateFromEnv SharedConfigState = iota + + // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value + // and disables the shared config functionality. + SharedConfigDisable + + // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value + // and enables the shared config functionality. + SharedConfigEnable +) + +// Options provides the means to control how a Session is created and what +// configuration values will be loaded. +type Options struct { + // Provides config values for the SDK to use when creating service clients + // and making API requests to services. Any value set in with this field + // will override the associated value provided by the SDK defaults, + // environment or config files where relevant. + // + // If not set, configuration values from from SDK defaults, environment, + // config will be used. + Config aws.Config + + // Overrides the config profile the Session should be created from. If not + // set the value of the environment variable will be loaded (AWS_PROFILE, + // or AWS_DEFAULT_PROFILE if the Shared Config is enabled). + // + // If not set and environment variables are not set the "default" + // (DefaultSharedConfigProfile) will be used as the profile to load the + // session config from. + Profile string + + // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG + // environment variable. By default a Session will be created using the + // value provided by the AWS_SDK_LOAD_CONFIG environment variable. + // + // Setting this value to SharedConfigEnable or SharedConfigDisable + // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable + // and enable or disable the shared config functionality. + SharedConfigState SharedConfigState + + // Ordered list of files the session will load configuration from. + // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE. + SharedConfigFiles []string + + // When the SDK's shared config is configured to assume a role with MFA + // this option is required in order to provide the mechanism that will + // retrieve the MFA token. There is no default value for this field. If + // it is not set an error will be returned when creating the session. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed. Within the context of service clients + // all sharing the same session the SDK will ensure calls to the token + // provider are atomic. When sharing a token provider across multiple + // sessions additional synchronization logic is needed to ensure the + // token providers do not introduce race conditions. It is recommend to + // share the session where possible. + // + // stscreds.StdinTokenProvider is a basic implementation that will prompt + // from stdin for the MFA token code. + // + // This field is only used if the shared configuration is enabled, and + // the config enables assume role with MFA via the mfa_serial field. + AssumeRoleTokenProvider func() (string, error) + + // When the SDK's shared config is configured to assume a role this option + // may be provided to set the expiry duration of the STS credentials. + // Defaults to 15 minutes if not set as documented in the + // stscreds.AssumeRoleProvider. + AssumeRoleDuration time.Duration + + // Reader for a custom Credentials Authority (CA) bundle in PEM format that + // the SDK will use instead of the default system's root CA bundle. Use this + // only if you want to replace the CA bundle the SDK uses for TLS requests. + // + // HTTP Client's Transport concrete implementation must be a http.Transport + // or creating the session will fail. + // + // If the Transport's TLS config is set this option will cause the SDK + // to overwrite the Transport's TLS config's RootCAs value. If the CA + // bundle reader contains multiple certificates all of them will be loaded. + // + // Can also be specified via the environment variable: + // + // AWS_CA_BUNDLE=$HOME/ca_bundle + // + // Can also be specified via the shared config field: + // + // ca_bundle = $HOME/ca_bundle + CustomCABundle io.Reader + + // Reader for the TLC client certificate that should be used by the SDK's + // HTTP transport when making requests. The certificate must be paired with + // a TLS client key file. Will be ignored if both are not provided. + // + // HTTP Client's Transport concrete implementation must be a http.Transport + // or creating the session will fail. + // + // Can also be specified via the environment variable: + // + // AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert + ClientTLSCert io.Reader + + // Reader for the TLC client key that should be used by the SDK's HTTP + // transport when making requests. The key must be paired with a TLS client + // certificate file. Will be ignored if both are not provided. + // + // HTTP Client's Transport concrete implementation must be a http.Transport + // or creating the session will fail. + // + // Can also be specified via the environment variable: + // + // AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key + ClientTLSKey io.Reader + + // The handlers that the session and all API clients will be created with. + // This must be a complete set of handlers. Use the defaults.Handlers() + // function to initialize this value before changing the handlers to be + // used by the SDK. + Handlers request.Handlers + + // Allows specifying a custom endpoint to be used by the EC2 IMDS client + // when making requests to the EC2 IMDS API. The endpoint value should + // include the URI scheme. If the scheme is not present it will be defaulted to http. + // + // If unset, will the EC2 IMDS client will use its default endpoint. + // + // Can also be specified via the environment variable, + // AWS_EC2_METADATA_SERVICE_ENDPOINT. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254 + // + // If using an URL with an IPv6 address literal, the IPv6 address + // component must be enclosed in square brackets. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + EC2IMDSEndpoint string + + // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 + EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState + + // Specifies options for creating credential providers. + // These are only used if the aws.Config does not already + // include credentials. + CredentialsProviderOptions *CredentialsProviderOptions +} + +// NewSessionWithOptions returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. This func uses the Options +// values to configure how the Session is created. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// // Equivalent to session.New +// sess := session.Must(session.NewSessionWithOptions(session.Options{})) +// +// // Specify profile to load for the session's config +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Profile: "profile_name", +// })) +// +// // Specify profile for config and region for requests +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Config: aws.Config{Region: aws.String("us-east-1")}, +// Profile: "profile_name", +// })) +// +// // Force enable Shared Config support +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// })) +func NewSessionWithOptions(opts Options) (*Session, error) { + var envCfg envConfig + var err error + if opts.SharedConfigState == SharedConfigEnable { + envCfg, err = loadSharedEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load shared config, %v", err) + } + } else { + envCfg, err = loadEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load environment config, %v", err) + } + } + + if len(opts.Profile) != 0 { + envCfg.Profile = opts.Profile + } + + switch opts.SharedConfigState { + case SharedConfigDisable: + envCfg.EnableSharedConfig = false + case SharedConfigEnable: + envCfg.EnableSharedConfig = true + } + + return newSession(opts, envCfg, &opts.Config) +} + +// Must is a helper function to ensure the Session is valid and there was no +// error when calling a NewSession function. +// +// This helper is intended to be used in variable initialization to load the +// Session and configuration at startup. Such as: +// +// var sess = session.Must(session.NewSession()) +func Must(sess *Session, err error) *Session { + if err != nil { + panic(err) + } + + return sess +} + +// Wraps the endpoint resolver with a resolver that will return a custom +// endpoint for EC2 IMDS. +func wrapEC2IMDSEndpoint(resolver endpoints.Resolver, endpoint string, mode endpoints.EC2IMDSEndpointModeState) endpoints.Resolver { + return endpoints.ResolverFunc( + func(service, region string, opts ...func(*endpoints.Options)) ( + endpoints.ResolvedEndpoint, error, + ) { + if service == ec2MetadataServiceID && len(endpoint) > 0 { + return endpoints.ResolvedEndpoint{ + URL: endpoint, + SigningName: ec2MetadataServiceID, + SigningRegion: region, + }, nil + } else if service == ec2MetadataServiceID { + opts = append(opts, func(o *endpoints.Options) { + o.EC2MetadataEndpointMode = mode + }) + } + return resolver.EndpointFor(service, region, opts...) + }) +} + +func deprecatedNewSession(envCfg envConfig, cfgs ...*aws.Config) *Session { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Apply the passed in configs so the configuration can be applied to the + // default credential chain + cfg.MergeIn(cfgs...) + if cfg.EndpointResolver == nil { + // An endpoint resolver is required for a session to be able to provide + // endpoints for service client configurations. + cfg.EndpointResolver = endpoints.DefaultResolver() + } + + if !(len(envCfg.EC2IMDSEndpoint) == 0 && envCfg.EC2IMDSEndpointMode == endpoints.EC2IMDSEndpointModeStateUnset) { + cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, envCfg.EC2IMDSEndpoint, envCfg.EC2IMDSEndpointMode) + } + + cfg.Credentials = defaults.CredChain(cfg, handlers) + + // Reapply any passed in configs to override credentials if set + cfg.MergeIn(cfgs...) + + s := &Session{ + Config: cfg, + Handlers: handlers, + options: Options{ + EC2IMDSEndpoint: envCfg.EC2IMDSEndpoint, + }, + } + + initHandlers(s) + return s +} + +func enableCSM(handlers *request.Handlers, cfg csmConfig, logger aws.Logger) error { + if logger != nil { + logger.Log("Enabling CSM") + } + + r, err := csm.Start(cfg.ClientID, csm.AddressWithDefaults(cfg.Host, cfg.Port)) + if err != nil { + return err + } + r.InjectHandlers(handlers) + + return nil +} + +func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { + cfg := defaults.Config() + + handlers := opts.Handlers + if handlers.IsEmpty() { + handlers = defaults.Handlers() + } + + // Get a merged version of the user provided config to determine if + // credentials were. + userCfg := &aws.Config{} + userCfg.MergeIn(cfgs...) + cfg.MergeIn(userCfg) + + // Ordered config files will be loaded in with later files overwriting + // previous config file values. + var cfgFiles []string + if opts.SharedConfigFiles != nil { + cfgFiles = opts.SharedConfigFiles + } else { + cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} + if !envCfg.EnableSharedConfig { + // The shared config file (~/.aws/config) is only loaded if instructed + // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). + cfgFiles = cfgFiles[1:] + } + } + + // Load additional config from file(s) + sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig) + if err != nil { + if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) { + // Special case where the user has not explicitly specified an AWS_PROFILE, + // or session.Options.profile, shared config is not enabled, and the + // environment has credentials, allow the shared config file to fail to + // load since the user has already provided credentials, and nothing else + // is required to be read file. Github(aws/aws-sdk-go#2455) + } else if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return nil, err + } + } + + if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { + return nil, err + } + + if err := setTLSOptions(&opts, cfg, envCfg, sharedCfg); err != nil { + return nil, err + } + + s := &Session{ + Config: cfg, + Handlers: handlers, + options: opts, + } + + initHandlers(s) + + if csmCfg, err := loadCSMConfig(envCfg, cfgFiles); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err = enableCSM(&s.Handlers, csmCfg, s.Config.Logger) + if err != nil { + return nil, err + } + } + + return s, nil +} + +type csmConfig struct { + Enabled bool + Host string + Port string + ClientID string +} + +var csmProfileName = "aws_csm" + +func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) { + if envCfg.CSMEnabled != nil { + if *envCfg.CSMEnabled { + return csmConfig{ + Enabled: true, + ClientID: envCfg.CSMClientID, + Host: envCfg.CSMHost, + Port: envCfg.CSMPort, + }, nil + } + return csmConfig{}, nil + } + + sharedCfg, err := loadSharedConfig(csmProfileName, cfgFiles, false) + if err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return csmConfig{}, err + } + } + if sharedCfg.CSMEnabled != nil && *sharedCfg.CSMEnabled == true { + return csmConfig{ + Enabled: true, + ClientID: sharedCfg.CSMClientID, + Host: sharedCfg.CSMHost, + Port: sharedCfg.CSMPort, + }, nil + } + + return csmConfig{}, nil +} + +func setTLSOptions(opts *Options, cfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig) error { + // CA Bundle can be specified in both environment variable shared config file. + var caBundleFilename = envCfg.CustomCABundle + if len(caBundleFilename) == 0 { + caBundleFilename = sharedCfg.CustomCABundle + } + + // Only use environment value if session option is not provided. + customTLSOptions := map[string]struct { + filename string + field *io.Reader + errCode string + }{ + "custom CA bundle PEM": {filename: caBundleFilename, field: &opts.CustomCABundle, errCode: ErrCodeLoadCustomCABundle}, + "custom client TLS cert": {filename: envCfg.ClientTLSCert, field: &opts.ClientTLSCert, errCode: ErrCodeLoadClientTLSCert}, + "custom client TLS key": {filename: envCfg.ClientTLSKey, field: &opts.ClientTLSKey, errCode: ErrCodeLoadClientTLSCert}, + } + for name, v := range customTLSOptions { + if len(v.filename) != 0 && *v.field == nil { + f, err := os.Open(v.filename) + if err != nil { + return awserr.New(v.errCode, fmt.Sprintf("failed to open %s file", name), err) + } + defer f.Close() + *v.field = f + } + } + + // Setup HTTP client with custom cert bundle if enabled + if opts.CustomCABundle != nil { + if err := loadCustomCABundle(cfg.HTTPClient, opts.CustomCABundle); err != nil { + return err + } + } + + // Setup HTTP client TLS certificate and key for client TLS authentication. + if opts.ClientTLSCert != nil && opts.ClientTLSKey != nil { + if err := loadClientTLSCert(cfg.HTTPClient, opts.ClientTLSCert, opts.ClientTLSKey); err != nil { + return err + } + } else if opts.ClientTLSCert == nil && opts.ClientTLSKey == nil { + // Do nothing if neither values are available. + + } else { + return awserr.New(ErrCodeLoadClientTLSCert, + fmt.Sprintf("client TLS cert(%t) and key(%t) must both be provided", + opts.ClientTLSCert != nil, opts.ClientTLSKey != nil), nil) + } + + return nil +} + +func getHTTPTransport(client *http.Client) (*http.Transport, error) { + var t *http.Transport + switch v := client.Transport.(type) { + case *http.Transport: + t = v + default: + if client.Transport != nil { + return nil, fmt.Errorf("unsupported transport, %T", client.Transport) + } + } + if t == nil { + // Nil transport implies `http.DefaultTransport` should be used. Since + // the SDK cannot modify, nor copy the `DefaultTransport` specifying + // the values the next closest behavior. + t = getCustomTransport() + } + + return t, nil +} + +func loadCustomCABundle(client *http.Client, bundle io.Reader) error { + t, err := getHTTPTransport(client) + if err != nil { + return awserr.New(ErrCodeLoadCustomCABundle, + "unable to load custom CA bundle, HTTPClient's transport unsupported type", err) + } + + p, err := loadCertPool(bundle) + if err != nil { + return err + } + if t.TLSClientConfig == nil { + t.TLSClientConfig = &tls.Config{} + } + t.TLSClientConfig.RootCAs = p + + client.Transport = t + + return nil +} + +func loadCertPool(r io.Reader) (*x509.CertPool, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New(ErrCodeLoadCustomCABundle, + "failed to read custom CA bundle PEM file", err) + } + + p := x509.NewCertPool() + if !p.AppendCertsFromPEM(b) { + return nil, awserr.New(ErrCodeLoadCustomCABundle, + "failed to load custom CA bundle PEM file", err) + } + + return p, nil +} + +func loadClientTLSCert(client *http.Client, certFile, keyFile io.Reader) error { + t, err := getHTTPTransport(client) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to get usable HTTP transport from client", err) + } + + cert, err := ioutil.ReadAll(certFile) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to get read client TLS cert file", err) + } + + key, err := ioutil.ReadAll(keyFile) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to get read client TLS key file", err) + } + + clientCert, err := tls.X509KeyPair(cert, key) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to load x509 key pair from client cert", err) + } + + tlsCfg := t.TLSClientConfig + if tlsCfg == nil { + tlsCfg = &tls.Config{} + } + + tlsCfg.Certificates = append(tlsCfg.Certificates, clientCert) + + t.TLSClientConfig = tlsCfg + client.Transport = t + + return nil +} + +func mergeConfigSrcs(cfg, userCfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) error { + + // Region if not already set by user + if len(aws.StringValue(cfg.Region)) == 0 { + if len(envCfg.Region) > 0 { + cfg.WithRegion(envCfg.Region) + } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 { + cfg.WithRegion(sharedCfg.Region) + } + } + + if cfg.EnableEndpointDiscovery == nil { + if envCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery) + } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery) + } + } + + // Regional Endpoint flag for STS endpoint resolving + mergeSTSRegionalEndpointConfig(cfg, []endpoints.STSRegionalEndpoint{ + userCfg.STSRegionalEndpoint, + envCfg.STSRegionalEndpoint, + sharedCfg.STSRegionalEndpoint, + endpoints.LegacySTSEndpoint, + }) + + // Regional Endpoint flag for S3 endpoint resolving + mergeS3UsEast1RegionalEndpointConfig(cfg, []endpoints.S3UsEast1RegionalEndpoint{ + userCfg.S3UsEast1RegionalEndpoint, + envCfg.S3UsEast1RegionalEndpoint, + sharedCfg.S3UsEast1RegionalEndpoint, + endpoints.LegacyS3UsEast1Endpoint, + }) + + var ec2IMDSEndpoint string + for _, v := range []string{ + sessOpts.EC2IMDSEndpoint, + envCfg.EC2IMDSEndpoint, + sharedCfg.EC2IMDSEndpoint, + } { + if len(v) != 0 { + ec2IMDSEndpoint = v + break + } + } + + var endpointMode endpoints.EC2IMDSEndpointModeState + for _, v := range []endpoints.EC2IMDSEndpointModeState{ + sessOpts.EC2IMDSEndpointMode, + envCfg.EC2IMDSEndpointMode, + sharedCfg.EC2IMDSEndpointMode, + } { + if v != endpoints.EC2IMDSEndpointModeStateUnset { + endpointMode = v + break + } + } + + if len(ec2IMDSEndpoint) != 0 || endpointMode != endpoints.EC2IMDSEndpointModeStateUnset { + cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, ec2IMDSEndpoint, endpointMode) + } + + cfg.EC2MetadataEnableFallback = userCfg.EC2MetadataEnableFallback + if cfg.EC2MetadataEnableFallback == nil && envCfg.EC2IMDSv1Disabled != nil { + cfg.EC2MetadataEnableFallback = aws.Bool(!*envCfg.EC2IMDSv1Disabled) + } + if cfg.EC2MetadataEnableFallback == nil && sharedCfg.EC2IMDSv1Disabled != nil { + cfg.EC2MetadataEnableFallback = aws.Bool(!*sharedCfg.EC2IMDSv1Disabled) + } + + cfg.S3UseARNRegion = userCfg.S3UseARNRegion + if cfg.S3UseARNRegion == nil { + cfg.S3UseARNRegion = &envCfg.S3UseARNRegion + } + if cfg.S3UseARNRegion == nil { + cfg.S3UseARNRegion = &sharedCfg.S3UseARNRegion + } + + for _, v := range []endpoints.DualStackEndpointState{userCfg.UseDualStackEndpoint, envCfg.UseDualStackEndpoint, sharedCfg.UseDualStackEndpoint} { + if v != endpoints.DualStackEndpointStateUnset { + cfg.UseDualStackEndpoint = v + break + } + } + + for _, v := range []endpoints.FIPSEndpointState{userCfg.UseFIPSEndpoint, envCfg.UseFIPSEndpoint, sharedCfg.UseFIPSEndpoint} { + if v != endpoints.FIPSEndpointStateUnset { + cfg.UseFIPSEndpoint = v + break + } + } + + // Configure credentials if not already set by the user when creating the Session. + // Credentials are resolved last such that all _resolved_ config values are propagated to credential providers. + // ticket: P83606045 + if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { + creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) + if err != nil { + return err + } + cfg.Credentials = creds + } + + return nil +} + +func mergeSTSRegionalEndpointConfig(cfg *aws.Config, values []endpoints.STSRegionalEndpoint) { + for _, v := range values { + if v != endpoints.UnsetSTSEndpoint { + cfg.STSRegionalEndpoint = v + break + } + } +} + +func mergeS3UsEast1RegionalEndpointConfig(cfg *aws.Config, values []endpoints.S3UsEast1RegionalEndpoint) { + for _, v := range values { + if v != endpoints.UnsetS3UsEast1Endpoint { + cfg.S3UsEast1RegionalEndpoint = v + break + } + } +} + +func initHandlers(s *Session) { + // Add the Validate parameter handler if it is not disabled. + s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) + if !aws.BoolValue(s.Config.DisableParamValidation) { + s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) + } +} + +// Copy creates and returns a copy of the current Session, copying the config +// and handlers. If any additional configs are provided they will be merged +// on top of the Session's copied config. +// +// // Create a copy of the current Session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) +func (s *Session) Copy(cfgs ...*aws.Config) *Session { + newSession := &Session{ + Config: s.Config.Copy(cfgs...), + Handlers: s.Handlers.Copy(), + options: s.options, + } + + initHandlers(newSession) + + return newSession +} + +// ClientConfig satisfies the client.ConfigProvider interface and is used to +// configure the service client instances. Passing the Session to the service +// client's constructor (New) will use this method to configure the client. +func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + + resolvedRegion := normalizeRegion(s.Config) + + region := aws.StringValue(s.Config.Region) + resolved, err := s.resolveEndpoint(service, region, resolvedRegion, s.Config) + if err != nil { + s.Handlers.Validate.PushBack(func(r *request.Request) { + if len(r.ClientInfo.Endpoint) != 0 { + // Error occurred while resolving endpoint, but the request + // being invoked has had an endpoint specified after the client + // was created. + return + } + r.Error = err + }) + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + PartitionID: resolved.PartitionID, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, + ResolvedRegion: resolvedRegion, + } +} + +const ec2MetadataServiceID = "ec2metadata" + +func (s *Session) resolveEndpoint(service, region, resolvedRegion string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) { + + if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 { + return endpoints.ResolvedEndpoint{ + URL: endpoints.AddScheme(ep, aws.BoolValue(cfg.DisableSSL)), + SigningRegion: region, + }, nil + } + + resolved, err := cfg.EndpointResolver.EndpointFor(service, region, + func(opt *endpoints.Options) { + opt.DisableSSL = aws.BoolValue(cfg.DisableSSL) + + opt.UseDualStack = aws.BoolValue(cfg.UseDualStack) + opt.UseDualStackEndpoint = cfg.UseDualStackEndpoint + + opt.UseFIPSEndpoint = cfg.UseFIPSEndpoint + + // Support for STSRegionalEndpoint where the STSRegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.STSRegionalEndpoint = cfg.STSRegionalEndpoint + + // Support for S3UsEast1RegionalEndpoint where the S3UsEast1RegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.S3UsEast1RegionalEndpoint = cfg.S3UsEast1RegionalEndpoint + + // Support the condition where the service is modeled but its + // endpoint metadata is not available. + opt.ResolveUnknownService = true + + opt.ResolvedRegion = resolvedRegion + + opt.Logger = cfg.Logger + opt.LogDeprecated = cfg.LogLevel.Matches(aws.LogDebugWithDeprecated) + }, + ) + if err != nil { + return endpoints.ResolvedEndpoint{}, err + } + + return resolved, nil +} + +// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception +// that the EndpointResolver will not be used to resolve the endpoint. The only +// endpoint set must come from the aws.Config.Endpoint field. +func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + + resolvedRegion := normalizeRegion(s.Config) + + var resolved endpoints.ResolvedEndpoint + if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { + resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) + resolved.SigningRegion = aws.StringValue(s.Config.Region) + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, + ResolvedRegion: resolvedRegion, + } +} + +// logDeprecatedNewSessionError function enables error handling for session +func (s *Session) logDeprecatedNewSessionError(msg string, err error, cfgs []*aws.Config) { + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s.Config.MergeIn(cfgs...) + s.Config.Logger.Log("ERROR:", msg, "Error:", err) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) +} + +// normalizeRegion resolves / normalizes the configured region (converts pseudo fips regions), and modifies the provided +// config to have the equivalent options for resolution and returns the resolved region name. +func normalizeRegion(cfg *aws.Config) (resolved string) { + const fipsInfix = "-fips-" + const fipsPrefix = "-fips" + const fipsSuffix = "fips-" + + region := aws.StringValue(cfg.Region) + + if strings.Contains(region, fipsInfix) || + strings.Contains(region, fipsPrefix) || + strings.Contains(region, fipsSuffix) { + resolved = strings.Replace(strings.Replace(strings.Replace( + region, fipsInfix, "-", -1), fipsPrefix, "", -1), fipsSuffix, "", -1) + cfg.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + } + + return resolved +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go new file mode 100644 index 000000000..f3ce8183d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -0,0 +1,856 @@ +package session + +import ( + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/internal/ini" +) + +const ( + // Static Credentials group + accessKeyIDKey = `aws_access_key_id` // group required + secretAccessKey = `aws_secret_access_key` // group required + sessionTokenKey = `aws_session_token` // optional + + // Assume Role Credentials group + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required (or credential_source) + credentialSourceKey = `credential_source` // group required (or source_profile) + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional + roleDurationSecondsKey = "duration_seconds" // optional + + // Prefix to be used for SSO sections. These are supposed to only exist in + // the shared config file, not the credentials file. + ssoSectionPrefix = `sso-session ` + + // AWS Single Sign-On (AWS SSO) group + ssoSessionNameKey = "sso_session" + + // AWS Single Sign-On (AWS SSO) group + ssoAccountIDKey = "sso_account_id" + ssoRegionKey = "sso_region" + ssoRoleNameKey = "sso_role_name" + ssoStartURL = "sso_start_url" + + // CSM options + csmEnabledKey = `csm_enabled` + csmHostKey = `csm_host` + csmPortKey = `csm_port` + csmClientIDKey = `csm_client_id` + + // Additional Config fields + regionKey = `region` + + // custom CA Bundle filename + customCABundleKey = `ca_bundle` + + // endpoint discovery group + enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional + + // External Credential Process + credentialProcessKey = `credential_process` // optional + + // Web Identity Token File + webIdentityTokenFileKey = `web_identity_token_file` // optional + + // Additional config fields for regional or legacy endpoints + stsRegionalEndpointSharedKey = `sts_regional_endpoints` + + // Additional config fields for regional or legacy endpoints + s3UsEast1RegionalSharedKey = `s3_us_east_1_regional_endpoint` + + // DefaultSharedConfigProfile is the default profile to be used when + // loading configuration from the config files if another profile name + // is not provided. + DefaultSharedConfigProfile = `default` + + // S3 ARN Region Usage + s3UseARNRegionKey = "s3_use_arn_region" + + // EC2 IMDS Endpoint Mode + ec2MetadataServiceEndpointModeKey = "ec2_metadata_service_endpoint_mode" + + // EC2 IMDS Endpoint + ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint" + + // ECS IMDSv1 disable fallback + ec2MetadataV1DisabledKey = "ec2_metadata_v1_disabled" + + // Use DualStack Endpoint Resolution + useDualStackEndpoint = "use_dualstack_endpoint" + + // Use FIPS Endpoint Resolution + useFIPSEndpointKey = "use_fips_endpoint" +) + +// sharedConfig represents the configuration fields of the SDK config files. +type sharedConfig struct { + Profile string + + // Credentials values from the config file. Both aws_access_key_id and + // aws_secret_access_key must be provided together in the same file to be + // considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of + // the other two fields are also provided. + // + // aws_access_key_id + // aws_secret_access_key + // aws_session_token + Creds credentials.Value + + CredentialSource string + CredentialProcess string + WebIdentityTokenFile string + + // SSO session options + SSOSessionName string + SSOSession *ssoSession + + SSOAccountID string + SSORegion string + SSORoleName string + SSOStartURL string + + RoleARN string + RoleSessionName string + ExternalID string + MFASerial string + AssumeRoleDuration *time.Duration + + SourceProfileName string + SourceProfile *sharedConfig + + // Region is the region the SDK should use for looking up AWS service + // endpoints and signing requests. + // + // region + Region string + + // CustomCABundle is the file path to a PEM file the SDK will read and + // use to configure the HTTP transport with additional CA certs that are + // not present in the platforms default CA store. + // + // This value will be ignored if the file does not exist. + // + // ca_bundle + CustomCABundle string + + // EnableEndpointDiscovery can be enabled in the shared config by setting + // endpoint_discovery_enabled to true + // + // endpoint_discovery_enabled = true + EnableEndpointDiscovery *bool + + // CSM Options + CSMEnabled *bool + CSMHost string + CSMPort string + CSMClientID string + + // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service + // + // sts_regional_endpoints = regional + // This can take value as `LegacySTSEndpoint` or `RegionalSTSEndpoint` + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service + // + // s3_us_east_1_regional_endpoint = regional + // This can take value as `LegacyS3UsEast1Endpoint` or `RegionalS3UsEast1Endpoint` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // s3_use_arn_region=true + S3UseARNRegion bool + + // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) + // + // ec2_metadata_service_endpoint_mode=IPv6 + EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState + + // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode. + // + // ec2_metadata_service_endpoint=http://fd00:ec2::254 + EC2IMDSEndpoint string + + // Specifies that IMDS clients should not fallback to IMDSv1 if token + // requests fail. + // + // ec2_metadata_v1_disabled=true + EC2IMDSv1Disabled *bool + + // Specifies that SDK clients must resolve a dual-stack endpoint for + // services. + // + // use_dualstack_endpoint=true + UseDualStackEndpoint endpoints.DualStackEndpointState + + // Specifies that SDK clients must resolve a FIPS endpoint for + // services. + // + // use_fips_endpoint=true + UseFIPSEndpoint endpoints.FIPSEndpointState +} + +type sharedConfigFile struct { + Filename string + IniData ini.Sections +} + +// SSOSession provides the shared configuration parameters of the sso-session +// section. +type ssoSession struct { + Name string + SSORegion string + SSOStartURL string +} + +func (s *ssoSession) setFromIniSection(section ini.Section) { + updateString(&s.Name, section, ssoSessionNameKey) + updateString(&s.SSORegion, section, ssoRegionKey) + updateString(&s.SSOStartURL, section, ssoStartURL) +} + +// loadSharedConfig retrieves the configuration from the list of files using +// the profile provided. The order the files are listed will determine +// precedence. Values in subsequent files will overwrite values defined in +// earlier files. +// +// For example, given two files A and B. Both define credentials. If the order +// of the files are A then B, B's credential values will be used instead of +// A's. +// +// See sharedConfig.setFromFile for information how the config files +// will be loaded. +func loadSharedConfig(profile string, filenames []string, exOpts bool) (sharedConfig, error) { + if len(profile) == 0 { + profile = DefaultSharedConfigProfile + } + + files, err := loadSharedConfigIniFiles(filenames) + if err != nil { + return sharedConfig{}, err + } + + cfg := sharedConfig{} + profiles := map[string]struct{}{} + if err = cfg.setFromIniFiles(profiles, profile, files, exOpts); err != nil { + return sharedConfig{}, err + } + + return cfg, nil +} + +func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { + files := make([]sharedConfigFile, 0, len(filenames)) + + for _, filename := range filenames { + sections, err := ini.OpenFile(filename) + if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile { + // Skip files which can't be opened and read for whatever reason + continue + } else if err != nil { + return nil, SharedConfigLoadError{Filename: filename, Err: err} + } + + files = append(files, sharedConfigFile{ + Filename: filename, IniData: sections, + }) + } + + return files, nil +} + +func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error { + cfg.Profile = profile + + // Trim files from the list that don't exist. + var skippedFiles int + var profileNotFoundErr error + for _, f := range files { + if err := cfg.setFromIniFile(profile, f, exOpts); err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + // Ignore profiles not defined in individual files. + profileNotFoundErr = err + skippedFiles++ + continue + } + return err + } + } + if skippedFiles == len(files) { + // If all files were skipped because the profile is not found, return + // the original profile not found error. + return profileNotFoundErr + } + + if _, ok := profiles[profile]; ok { + // if this is the second instance of the profile the Assume Role + // options must be cleared because they are only valid for the + // first reference of a profile. The self linked instance of the + // profile only have credential provider options. + cfg.clearAssumeRoleOptions() + } else { + // First time a profile has been seen. Assert if the credential type + // requires a role ARN, the ARN is also set + if err := cfg.validateCredentialsConfig(profile); err != nil { + return err + } + } + + profiles[profile] = struct{}{} + + if err := cfg.validateCredentialType(); err != nil { + return err + } + + // Link source profiles for assume roles + if len(cfg.SourceProfileName) != 0 { + // Linked profile via source_profile ignore credential provider + // options, the source profile must provide the credentials. + cfg.clearCredentialOptions() + + srcCfg := &sharedConfig{} + err := srcCfg.setFromIniFiles(profiles, cfg.SourceProfileName, files, exOpts) + if err != nil { + // SourceProfile that doesn't exist is an error in configuration. + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + err = SharedConfigAssumeRoleError{ + RoleARN: cfg.RoleARN, + SourceProfile: cfg.SourceProfileName, + } + } + return err + } + + if !srcCfg.hasCredentials() { + return SharedConfigAssumeRoleError{ + RoleARN: cfg.RoleARN, + SourceProfile: cfg.SourceProfileName, + } + } + + cfg.SourceProfile = srcCfg + } + + // If the profile contains an SSO session parameter, the session MUST exist + // as a section in the config file. Load the SSO session using the name + // provided. If the session section is not found or incomplete an error + // will be returned. + if cfg.hasSSOTokenProviderConfiguration() { + skippedFiles = 0 + for _, f := range files { + section, ok := f.IniData.GetSection(fmt.Sprintf(ssoSectionPrefix + strings.TrimSpace(cfg.SSOSessionName))) + if ok { + var ssoSession ssoSession + ssoSession.setFromIniSection(section) + ssoSession.Name = cfg.SSOSessionName + cfg.SSOSession = &ssoSession + break + } + skippedFiles++ + } + if skippedFiles == len(files) { + // If all files were skipped because the sso session section is not found, return + // the sso section not found error. + return fmt.Errorf("failed to find SSO session section, %v", cfg.SSOSessionName) + } + } + + return nil +} + +// setFromFile loads the configuration from the file using the profile +// provided. A sharedConfig pointer type value is used so that multiple config +// file loadings can be chained. +// +// Only loads complete logically grouped values, and will not set fields in cfg +// for incomplete grouped values in the config. Such as credentials. For +// example if a config file only includes aws_access_key_id but no +// aws_secret_access_key the aws_access_key_id will be ignored. +func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, exOpts bool) error { + section, ok := file.IniData.GetSection(profile) + if !ok { + // Fallback to to alternate profile name: profile + section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) + if !ok { + return SharedConfigProfileNotExistsError{Profile: profile, Err: nil} + } + } + + if exOpts { + // Assume Role Parameters + updateString(&cfg.RoleARN, section, roleArnKey) + updateString(&cfg.ExternalID, section, externalIDKey) + updateString(&cfg.MFASerial, section, mfaSerialKey) + updateString(&cfg.RoleSessionName, section, roleSessionNameKey) + updateString(&cfg.SourceProfileName, section, sourceProfileKey) + updateString(&cfg.CredentialSource, section, credentialSourceKey) + updateString(&cfg.Region, section, regionKey) + updateString(&cfg.CustomCABundle, section, customCABundleKey) + + // we're retaining a behavioral quirk with this field that existed before + // the removal of literal parsing for (aws-sdk-go-v2/#2276): + // - if the key is missing, the config field will not be set + // - if the key is set to a non-numeric, the config field will be set to 0 + if section.Has(roleDurationSecondsKey) { + var d time.Duration + if v, ok := section.Int(roleDurationSecondsKey); ok { + d = time.Duration(v) * time.Second + } + cfg.AssumeRoleDuration = &d + } + + if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 { + sre, err := endpoints.GetSTSRegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + stsRegionalEndpointSharedKey, file.Filename, err) + } + cfg.STSRegionalEndpoint = sre + } + + if v := section.String(s3UsEast1RegionalSharedKey); len(v) != 0 { + sre, err := endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + s3UsEast1RegionalSharedKey, file.Filename, err) + } + cfg.S3UsEast1RegionalEndpoint = sre + } + + // AWS Single Sign-On (AWS SSO) + // SSO session options + updateString(&cfg.SSOSessionName, section, ssoSessionNameKey) + + // AWS Single Sign-On (AWS SSO) + updateString(&cfg.SSOAccountID, section, ssoAccountIDKey) + updateString(&cfg.SSORegion, section, ssoRegionKey) + updateString(&cfg.SSORoleName, section, ssoRoleNameKey) + updateString(&cfg.SSOStartURL, section, ssoStartURL) + + if err := updateEC2MetadataServiceEndpointMode(&cfg.EC2IMDSEndpointMode, section, ec2MetadataServiceEndpointModeKey); err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + ec2MetadataServiceEndpointModeKey, file.Filename, err) + } + updateString(&cfg.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey) + updateBoolPtr(&cfg.EC2IMDSv1Disabled, section, ec2MetadataV1DisabledKey) + + updateUseDualStackEndpoint(&cfg.UseDualStackEndpoint, section, useDualStackEndpoint) + + updateUseFIPSEndpoint(&cfg.UseFIPSEndpoint, section, useFIPSEndpointKey) + } + + updateString(&cfg.CredentialProcess, section, credentialProcessKey) + updateString(&cfg.WebIdentityTokenFile, section, webIdentityTokenFileKey) + + // Shared Credentials + creds := credentials.Value{ + AccessKeyID: section.String(accessKeyIDKey), + SecretAccessKey: section.String(secretAccessKey), + SessionToken: section.String(sessionTokenKey), + ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), + } + if creds.HasKeys() { + cfg.Creds = creds + } + + // Endpoint discovery + updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) + + // CSM options + updateBoolPtr(&cfg.CSMEnabled, section, csmEnabledKey) + updateString(&cfg.CSMHost, section, csmHostKey) + updateString(&cfg.CSMPort, section, csmPortKey) + updateString(&cfg.CSMClientID, section, csmClientIDKey) + + updateBool(&cfg.S3UseARNRegion, section, s3UseARNRegionKey) + + return nil +} + +func updateEC2MetadataServiceEndpointMode(endpointMode *endpoints.EC2IMDSEndpointModeState, section ini.Section, key string) error { + if !section.Has(key) { + return nil + } + value := section.String(key) + return endpointMode.SetFromString(value) +} + +func (cfg *sharedConfig) validateCredentialsConfig(profile string) error { + if err := cfg.validateCredentialsRequireARN(profile); err != nil { + return err + } + + return nil +} + +func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error { + var credSource string + + switch { + case len(cfg.SourceProfileName) != 0: + credSource = sourceProfileKey + case len(cfg.CredentialSource) != 0: + credSource = credentialSourceKey + case len(cfg.WebIdentityTokenFile) != 0: + credSource = webIdentityTokenFileKey + } + + if len(credSource) != 0 && len(cfg.RoleARN) == 0 { + return CredentialRequiresARNError{ + Type: credSource, + Profile: profile, + } + } + + return nil +} + +func (cfg *sharedConfig) validateCredentialType() error { + // Only one or no credential type can be defined. + if !oneOrNone( + len(cfg.SourceProfileName) != 0, + len(cfg.CredentialSource) != 0, + len(cfg.CredentialProcess) != 0, + len(cfg.WebIdentityTokenFile) != 0, + ) { + return ErrSharedConfigSourceCollision + } + + return nil +} + +func (cfg *sharedConfig) validateSSOConfiguration() error { + if cfg.hasSSOTokenProviderConfiguration() { + err := cfg.validateSSOTokenProviderConfiguration() + if err != nil { + return err + } + return nil + } + + if cfg.hasLegacySSOConfiguration() { + err := cfg.validateLegacySSOConfiguration() + if err != nil { + return err + } + } + return nil +} + +func (cfg *sharedConfig) hasCredentials() bool { + switch { + case len(cfg.SourceProfileName) != 0: + case len(cfg.CredentialSource) != 0: + case len(cfg.CredentialProcess) != 0: + case len(cfg.WebIdentityTokenFile) != 0: + case cfg.hasSSOConfiguration(): + case cfg.Creds.HasKeys(): + default: + return false + } + + return true +} + +func (cfg *sharedConfig) clearCredentialOptions() { + cfg.CredentialSource = "" + cfg.CredentialProcess = "" + cfg.WebIdentityTokenFile = "" + cfg.Creds = credentials.Value{} + cfg.SSOAccountID = "" + cfg.SSORegion = "" + cfg.SSORoleName = "" + cfg.SSOStartURL = "" +} + +func (cfg *sharedConfig) clearAssumeRoleOptions() { + cfg.RoleARN = "" + cfg.ExternalID = "" + cfg.MFASerial = "" + cfg.RoleSessionName = "" + cfg.SourceProfileName = "" +} + +func (cfg *sharedConfig) hasSSOConfiguration() bool { + return cfg.hasSSOTokenProviderConfiguration() || cfg.hasLegacySSOConfiguration() +} + +func (c *sharedConfig) hasSSOTokenProviderConfiguration() bool { + return len(c.SSOSessionName) > 0 +} + +func (c *sharedConfig) hasLegacySSOConfiguration() bool { + return len(c.SSORegion) > 0 || len(c.SSOAccountID) > 0 || len(c.SSOStartURL) > 0 || len(c.SSORoleName) > 0 +} + +func (c *sharedConfig) validateSSOTokenProviderConfiguration() error { + var missing []string + + if len(c.SSOSessionName) == 0 { + missing = append(missing, ssoSessionNameKey) + } + + if c.SSOSession == nil { + missing = append(missing, ssoSectionPrefix) + } else { + if len(c.SSOSession.SSORegion) == 0 { + missing = append(missing, ssoRegionKey) + } + + if len(c.SSOSession.SSOStartURL) == 0 { + missing = append(missing, ssoStartURL) + } + } + + if len(missing) > 0 { + return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", + c.Profile, strings.Join(missing, ", ")) + } + + if len(c.SSORegion) > 0 && c.SSORegion != c.SSOSession.SSORegion { + return fmt.Errorf("%s in profile %q must match %s in %s", ssoRegionKey, c.Profile, ssoRegionKey, ssoSectionPrefix) + } + + if len(c.SSOStartURL) > 0 && c.SSOStartURL != c.SSOSession.SSOStartURL { + return fmt.Errorf("%s in profile %q must match %s in %s", ssoStartURL, c.Profile, ssoStartURL, ssoSectionPrefix) + } + + return nil +} + +func (c *sharedConfig) validateLegacySSOConfiguration() error { + var missing []string + + if len(c.SSORegion) == 0 { + missing = append(missing, ssoRegionKey) + } + + if len(c.SSOStartURL) == 0 { + missing = append(missing, ssoStartURL) + } + + if len(c.SSOAccountID) == 0 { + missing = append(missing, ssoAccountIDKey) + } + + if len(c.SSORoleName) == 0 { + missing = append(missing, ssoRoleNameKey) + } + + if len(missing) > 0 { + return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", + c.Profile, strings.Join(missing, ", ")) + } + return nil +} + +func oneOrNone(bs ...bool) bool { + var count int + + for _, b := range bs { + if b { + count++ + if count > 1 { + return false + } + } + } + + return true +} + +// updateString will only update the dst with the value in the section key, key +// is present in the section. +func updateString(dst *string, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.String(key) +} + +// updateBool will only update the dst with the value in the section key, key +// is present in the section. +func updateBool(dst *bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + + // retains pre-(aws-sdk-go-v2#2276) behavior where non-bool value would resolve to false + v, _ := section.Bool(key) + *dst = v +} + +// updateBoolPtr will only update the dst with the value in the section key, +// key is present in the section. +func updateBoolPtr(dst **bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + + // retains pre-(aws-sdk-go-v2#2276) behavior where non-bool value would resolve to false + v, _ := section.Bool(key) + *dst = new(bool) + **dst = v +} + +// SharedConfigLoadError is an error for the shared config file failed to load. +type SharedConfigLoadError struct { + Filename string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigLoadError) Code() string { + return "SharedConfigLoadError" +} + +// Message is the description of the error +func (e SharedConfigLoadError) Message() string { + return fmt.Sprintf("failed to load config file, %s", e.Filename) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigLoadError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigLoadError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigProfileNotExistsError is an error for the shared config when +// the profile was not find in the config file. +type SharedConfigProfileNotExistsError struct { + Profile string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigProfileNotExistsError) Code() string { + return "SharedConfigProfileNotExistsError" +} + +// Message is the description of the error +func (e SharedConfigProfileNotExistsError) Message() string { + return fmt.Sprintf("failed to get profile, %s", e.Profile) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigProfileNotExistsError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigProfileNotExistsError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigAssumeRoleError is an error for the shared config when the +// profile contains assume role information, but that information is invalid +// or not complete. +type SharedConfigAssumeRoleError struct { + RoleARN string + SourceProfile string +} + +// Code is the short id of the error. +func (e SharedConfigAssumeRoleError) Code() string { + return "SharedConfigAssumeRoleError" +} + +// Message is the description of the error +func (e SharedConfigAssumeRoleError) Message() string { + return fmt.Sprintf( + "failed to load assume role for %s, source profile %s has no shared credentials", + e.RoleARN, e.SourceProfile, + ) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigAssumeRoleError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e SharedConfigAssumeRoleError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +// CredentialRequiresARNError provides the error for shared config credentials +// that are incorrectly configured in the shared config or credentials file. +type CredentialRequiresARNError struct { + // type of credentials that were configured. + Type string + + // Profile name the credentials were in. + Profile string +} + +// Code is the short id of the error. +func (e CredentialRequiresARNError) Code() string { + return "CredentialRequiresARNError" +} + +// Message is the description of the error +func (e CredentialRequiresARNError) Message() string { + return fmt.Sprintf( + "credential type %s requires role_arn, profile %s", + e.Type, e.Profile, + ) +} + +// OrigErr is the underlying error that caused the failure. +func (e CredentialRequiresARNError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e CredentialRequiresARNError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +// updateEndpointDiscoveryType will only update the dst with the value in the section, if +// a valid key and corresponding EndpointDiscoveryType is found. +func updateUseDualStackEndpoint(dst *endpoints.DualStackEndpointState, section ini.Section, key string) { + if !section.Has(key) { + return + } + + // retains pre-(aws-sdk-go-v2/#2276) behavior where non-bool value would resolve to false + if v, _ := section.Bool(key); v { + *dst = endpoints.DualStackEndpointStateEnabled + } else { + *dst = endpoints.DualStackEndpointStateDisabled + } + + return +} + +// updateEndpointDiscoveryType will only update the dst with the value in the section, if +// a valid key and corresponding EndpointDiscoveryType is found. +func updateUseFIPSEndpoint(dst *endpoints.FIPSEndpointState, section ini.Section, key string) { + if !section.Has(key) { + return + } + + // retains pre-(aws-sdk-go-v2/#2276) behavior where non-bool value would resolve to false + if v, _ := section.Bool(key); v { + *dst = endpoints.FIPSEndpointStateEnabled + } else { + *dst = endpoints.FIPSEndpointStateDisabled + } + + return +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go new file mode 100644 index 000000000..993753831 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go @@ -0,0 +1,81 @@ +package v4 + +import ( + "github.com/aws/aws-sdk-go/internal/strings" +) + +// validator houses a set of rule needed for validation of a +// string value +type rules []rule + +// rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that rule +type rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// mapRule generic rule for maps +type mapRule map[string]struct{} + +// IsValid for the map rule satisfies whether it exists in the map +func (m mapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// allowList is a generic rule for allow listing +type allowList struct { + rule +} + +// IsValid for allow list checks if the value is within the allow list +func (w allowList) IsValid(value string) bool { + return w.rule.IsValid(value) +} + +// excludeList is a generic rule for exclude listing +type excludeList struct { + rule +} + +// IsValid for exclude list checks if the value is within the exclude list +func (b excludeList) IsValid(value string) bool { + return !b.rule.IsValid(value) +} + +type patterns []string + +// IsValid for patterns checks each pattern and returns if a match has +// been found +func (p patterns) IsValid(value string) bool { + for _, pattern := range p { + if strings.HasPrefixFold(value, pattern) { + return true + } + } + return false +} + +// inclusiveRules rules allow for rules to depend on one another +type inclusiveRules []rule + +// IsValid will return true if all rules are true +func (r inclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go new file mode 100644 index 000000000..6aa2ed241 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go @@ -0,0 +1,7 @@ +package v4 + +// WithUnsignedPayload will enable and set the UnsignedPayload field to +// true of the signer. +func WithUnsignedPayload(v4 *Signer) { + v4.UnsignedPayload = true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go new file mode 100644 index 000000000..cf672b6ac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go @@ -0,0 +1,14 @@ +//go:build !go1.7 +// +build !go1.7 + +package v4 + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws" +) + +func requestContext(r *http.Request) aws.Context { + return aws.BackgroundContext() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go new file mode 100644 index 000000000..21fe74e6f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go @@ -0,0 +1,14 @@ +//go:build go1.7 +// +build go1.7 + +package v4 + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws" +) + +func requestContext(r *http.Request) aws.Context { + return r.Context() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go new file mode 100644 index 000000000..02cbd97e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go @@ -0,0 +1,63 @@ +package v4 + +import ( + "encoding/hex" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +type credentialValueProvider interface { + Get() (credentials.Value, error) +} + +// StreamSigner implements signing of event stream encoded payloads +type StreamSigner struct { + region string + service string + + credentials credentialValueProvider + + prevSig []byte +} + +// NewStreamSigner creates a SigV4 signer used to sign Event Stream encoded messages +func NewStreamSigner(region, service string, seedSignature []byte, credentials *credentials.Credentials) *StreamSigner { + return &StreamSigner{ + region: region, + service: service, + credentials: credentials, + prevSig: seedSignature, + } +} + +// GetSignature takes an event stream encoded headers and payload and returns a signature +func (s *StreamSigner) GetSignature(headers, payload []byte, date time.Time) ([]byte, error) { + credValue, err := s.credentials.Get() + if err != nil { + return nil, err + } + + sigKey := deriveSigningKey(s.region, s.service, credValue.SecretAccessKey, date) + + keyPath := buildSigningScope(s.region, s.service, date) + + stringToSign := buildEventStreamStringToSign(headers, payload, s.prevSig, keyPath, date) + + signature := hmacSHA256(sigKey, []byte(stringToSign)) + s.prevSig = signature + + return signature, nil +} + +func buildEventStreamStringToSign(headers, payload, prevSig []byte, scope string, date time.Time) string { + return strings.Join([]string{ + "AWS4-HMAC-SHA256-PAYLOAD", + formatTime(date), + scope, + hex.EncodeToString(prevSig), + hex.EncodeToString(hashSHA256(headers)), + hex.EncodeToString(hashSHA256(payload)), + }, "\n") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go new file mode 100644 index 000000000..7711ec737 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go @@ -0,0 +1,25 @@ +//go:build go1.5 +// +build go1.5 + +package v4 + +import ( + "net/url" + "strings" +) + +func getURIPath(u *url.URL) string { + var uri string + + if len(u.Opaque) > 0 { + uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") + } else { + uri = u.EscapedPath() + } + + if len(uri) == 0 { + uri = "/" + } + + return uri +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go new file mode 100644 index 000000000..b542df931 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -0,0 +1,857 @@ +// Package v4 implements signing for AWS V4 signer +// +// Provides request signing for request that need to be signed with +// AWS V4 Signatures. +// +// # Standalone Signer +// +// Generally using the signer outside of the SDK should not require any additional +// logic when using Go v1.5 or higher. The signer does this by taking advantage +// of the URL.EscapedPath method. If your request URI requires additional escaping +// you may need to use the URL.Opaque to define what the raw URI should be sent +// to the service as. +// +// The signer will first check the URL.Opaque field, and use its value if set. +// The signer does require the URL.Opaque field to be set in the form of: +// +// "///" +// +// // e.g. +// "//example.com/some/path" +// +// The leading "//" and hostname are required or the URL.Opaque escaping will +// not work correctly. +// +// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() +// method and using the returned value. If you're using Go v1.4 you must set +// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with +// Go v1.5 the signer will fallback to URL.Path. +// +// AWS v4 signature validation requires that the canonical string's URI path +// element must be the URI escaped form of the HTTP request's path. +// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +// +// The Go HTTP client will perform escaping automatically on the request. Some +// of these escaping may cause signature validation errors because the HTTP +// request differs from the URI path or query that the signature was generated. +// https://golang.org/pkg/net/url/#URL.EscapedPath +// +// Because of this, it is recommended that when using the signer outside of the +// SDK that explicitly escaping the request prior to being signed is preferable, +// and will help prevent signature validation errors. This can be done by setting +// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then +// call URL.EscapedPath() if Opaque is not set. +// +// If signing a request intended for HTTP2 server, and you're using Go 1.6.2 +// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the +// request URL. https://github.com/golang/go/issues/16847 points to a bug in +// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP +// message. URL.Opaque generally will force Go to make requests with absolute URL. +// URL.RawPath does not do this, but RawPath must be a valid escaping of Path +// or url.EscapedPath will ignore the RawPath escaping. +// +// Test `TestStandaloneSign` provides a complete example of using the signer +// outside of the SDK and pre-escaping the URI path. +package v4 + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + authorizationHeader = "Authorization" + authHeaderSignatureElem = "Signature=" + signatureQueryKey = "X-Amz-Signature" + + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" + awsV4Request = "aws4_request" + + // emptyStringSHA256 is a SHA256 of an empty string + emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` +) + +var ignoredHeaders = rules{ + excludeList{ + mapRule{ + authorizationHeader: struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, + }, + }, +} + +// requiredSignedHeaders is a allow list for build canonical headers. +var requiredSignedHeaders = rules{ + allowList{ + mapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Expected-Bucket-Owner": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Context": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Tagging": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + "X-Amz-Content-Sha256": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, + patterns{"X-Amz-Object-Lock-"}, +} + +// allowedHoisting is a allow list for build query headers. The boolean value +// represents whether or not it is a pattern. +var allowedQueryHoisting = inclusiveRules{ + excludeList{requiredSignedHeaders}, + patterns{"X-Amz-"}, +} + +// Signer applies AWS v4 signing to given request. Use this to sign requests +// that need to be signed with AWS V4 Signatures. +type Signer struct { + // The authentication credentials the request will be signed against. + // This value must be set to sign requests. + Credentials *credentials.Credentials + + // Sets the log level the signer should use when reporting information to + // the logger. If the logger is nil nothing will be logged. See + // aws.LogLevelType for more information on available logging levels + // + // By default nothing will be logged. + Debug aws.LogLevelType + + // The logger loging information will be written to. If there the logger + // is nil, nothing will be logged. + Logger aws.Logger + + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // Disables the automatic escaping of the URI path of the request for the + // siganture's canonical string's path. For services that do not need additional + // escaping then use this to disable the signer escaping the path. + // + // S3 is an example of a service that does not need additional escaping. + // + // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + DisableURIPathEscaping bool + + // Disables the automatical setting of the HTTP request's Body field with the + // io.ReadSeeker passed in to the signer. This is useful if you're using a + // custom wrapper around the body for the io.ReadSeeker and want to preserve + // the Body value on the Request.Body. + // + // This does run the risk of signing a request with a body that will not be + // sent in the request. Need to ensure that the underlying data of the Body + // values are the same. + DisableRequestBodyOverwrite bool + + // currentTimeFn returns the time value which represents the current time. + // This value should only be used for testing. If it is nil the default + // time.Now will be used. + currentTimeFn func() time.Time + + // UnsignedPayload will prevent signing of the payload. This will only + // work for services that have support for this. + UnsignedPayload bool +} + +// NewSigner returns a Signer pointer configured with the credentials and optional +// option values provided. If not options are provided the Signer will use its +// default configuration. +func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer { + v4 := &Signer{ + Credentials: credentials, + } + + for _, option := range options { + option(v4) + } + + return v4 +} + +type signingCtx struct { + ServiceName string + Region string + Request *http.Request + Body io.ReadSeeker + Query url.Values + Time time.Time + ExpireTime time.Duration + SignedHeaderVals http.Header + + DisableURIPathEscaping bool + + credValues credentials.Value + isPresign bool + unsignedPayload bool + + bodyDigest string + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string +} + +// Sign signs AWS v4 requests with the provided body, service name, region the +// request is made to, and time the request is signed at. The signTime allows +// you to specify that a request is signed for the future, and cannot be +// used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. Generally for signed requests this value +// is not needed as the full request context will be captured by the http.Request +// value. It is included for reference though. +// +// Sign will set the request's Body to be the `body` parameter passed in. If +// the body is not already an io.ReadCloser, it will be wrapped within one. If +// a `nil` body parameter passed to Sign, the request's Body field will be +// also set to nil. Its important to note that this functionality will not +// change the request's ContentLength of the request. +// +// Sign differs from Presign in that it will sign the request using HTTP +// header values. This type of signing is intended for http.Request values that +// will not be shared, or are shared in a way the header values on the request +// will not be lost. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, 0, false, signTime) +} + +// Presign signs AWS v4 requests with the provided body, service name, region +// the request is made to, and time the request is signed at. The signTime +// allows you to specify that a request is signed for the future, and cannot +// be used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. For presigned requests these headers +// and their values must be included on the HTTP request when it is made. This +// is helpful to know what header values need to be shared with the party the +// presigned request will be distributed to. +// +// Presign differs from Sign in that it will sign the request using query string +// instead of header values. This allows you to share the Presigned Request's +// URL with third parties, or distribute it throughout your system with minimal +// dependencies. +// +// Presign also takes an exp value which is the duration the +// signed request will be valid after the signing time. This is allows you to +// set when the request will expire. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +// +// Presigning a S3 request will not compute the body's SHA256 hash by default. +// This is done due to the general use case for S3 presigned URLs is to share +// PUT/GET capabilities. If you would like to include the body's SHA256 in the +// presigned request's signature you can set the "X-Amz-Content-Sha256" +// HTTP header and that will be included in the request's signature. +func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, exp, true, signTime) +} + +func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) { + currentTimeFn := v4.currentTimeFn + if currentTimeFn == nil { + currentTimeFn = time.Now + } + + ctx := &signingCtx{ + Request: r, + Body: body, + Query: r.URL.Query(), + Time: signTime, + ExpireTime: exp, + isPresign: isPresign, + ServiceName: service, + Region: region, + DisableURIPathEscaping: v4.DisableURIPathEscaping, + unsignedPayload: v4.UnsignedPayload, + } + + for key := range ctx.Query { + sort.Strings(ctx.Query[key]) + } + + if ctx.isRequestSigned() { + ctx.Time = currentTimeFn() + ctx.handlePresignRemoval() + } + + var err error + ctx.credValues, err = v4.Credentials.GetWithContext(requestContext(r)) + if err != nil { + return http.Header{}, err + } + + ctx.sanitizeHostForHeader() + ctx.assignAmzQueryValues() + if err := ctx.build(v4.DisableHeaderHoisting); err != nil { + return nil, err + } + + // If the request is not presigned the body should be attached to it. This + // prevents the confusion of wanting to send a signed request without + // the body the request was signed for attached. + if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) { + var reader io.ReadCloser + if body != nil { + var ok bool + if reader, ok = body.(io.ReadCloser); !ok { + reader = ioutil.NopCloser(body) + } + } + r.Body = reader + } + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo(ctx) + } + + return ctx.SignedHeaderVals, nil +} + +func (ctx *signingCtx) sanitizeHostForHeader() { + request.SanitizeHostForHeader(ctx.Request) +} + +func (ctx *signingCtx) handlePresignRemoval() { + if !ctx.isPresign { + return + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + ctx.removePresign() + + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + ctx.Request.URL.RawQuery = ctx.Query.Encode() +} + +func (ctx *signingCtx) assignAmzQueryValues() { + if ctx.isPresign { + ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if ctx.credValues.SessionToken != "" { + ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } else { + ctx.Query.Del("X-Amz-Security-Token") + } + + return + } + + if ctx.credValues.SessionToken != "" { + ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } +} + +// SignRequestHandler is a named request handler the SDK will use to sign +// service client request with using the V4 signature. +var SignRequestHandler = request.NamedHandler{ + Name: "v4.SignRequestHandler", Fn: SignSDKRequest, +} + +// SignSDKRequest signs an AWS request with the V4 signature. This +// request handler should only be used with the SDK's built in service client's +// API operation requests. +// +// This function should not be used on its own, but in conjunction with +// an AWS service client's API operation call. To sign a standalone request +// not created by a service client's API operation method use the "Sign" or +// "Presign" functions of the "Signer" type. +// +// If the credentials of the request's config are set to +// credentials.AnonymousCredentials the request will not be signed. +func SignSDKRequest(req *request.Request) { + SignSDKRequestWithCurrentTime(req, time.Now) +} + +// BuildNamedHandler will build a generic handler for signing. +func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler { + return request.NamedHandler{ + Name: name, + Fn: func(req *request.Request) { + SignSDKRequestWithCurrentTime(req, time.Now, opts...) + }, + } +} + +// SignSDKRequestWithCurrentTime will sign the SDK's request using the time +// function passed in. Behaves the same as SignSDKRequest with the exception +// the request is signed with the value returned by the current time function. +func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.ClientInfo.SigningRegion + if region == "" { + region = aws.StringValue(req.Config.Region) + } + + name := req.ClientInfo.SigningName + if name == "" { + name = req.ClientInfo.ServiceName + } + + v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { + v4.Debug = req.Config.LogLevel.Value() + v4.Logger = req.Config.Logger + v4.DisableHeaderHoisting = req.NotHoist + v4.currentTimeFn = curTimeFn + if name == "s3" { + // S3 service should not have any escaping applied + v4.DisableURIPathEscaping = true + } + // Prevents setting the HTTPRequest's Body. Since the Body could be + // wrapped in a custom io.Closer that we do not want to be stompped + // on top of by the signer. + v4.DisableRequestBodyOverwrite = true + }) + + for _, opt := range opts { + opt(v4) + } + + curTime := curTimeFn() + signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), + name, region, req.ExpireTime, req.ExpireTime > 0, curTime, + ) + if err != nil { + req.Error = err + req.SignedHeaderVals = nil + return + } + + req.SignedHeaderVals = signedHeaders + req.LastSignedAt = curTime +} + +const logSignInfoMsg = `DEBUG: Request Signature: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *Signer) logSigningInfo(ctx *signingCtx) { + signedURLMsg := "" + if ctx.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (ctx *signingCtx) build(disableHeaderHoisting bool) error { + ctx.buildTime() // no depends + ctx.buildCredentialString() // no depends + + if err := ctx.buildBodyDigest(); err != nil { + return err + } + + unsignedHeaders := ctx.Request.Header + if ctx.isPresign { + if !disableHeaderHoisting { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends + for k := range urlValues { + ctx.Query[k] = urlValues[k] + } + } + } + + ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) + ctx.buildCanonicalString() // depends on canon headers / signed headers + ctx.buildStringToSign() // depends on canon string + ctx.buildSignature() // depends on string to sign + + if ctx.isPresign { + ctx.Request.URL.RawQuery += "&" + signatureQueryKey + "=" + ctx.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, + "SignedHeaders=" + ctx.signedHeaders, + authHeaderSignatureElem + ctx.signature, + } + ctx.Request.Header.Set(authorizationHeader, strings.Join(parts, ", ")) + } + + return nil +} + +// GetSignedRequestSignature attempts to extract the signature of the request. +// Returning an error if the request is unsigned, or unable to extract the +// signature. +func GetSignedRequestSignature(r *http.Request) ([]byte, error) { + + if auth := r.Header.Get(authorizationHeader); len(auth) != 0 { + ps := strings.Split(auth, ", ") + for _, p := range ps { + if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 { + sig := p[len(authHeaderSignatureElem):] + if len(sig) == 0 { + return nil, fmt.Errorf("invalid request signature authorization header") + } + return hex.DecodeString(sig) + } + } + } + + if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 { + return hex.DecodeString(sig) + } + + return nil, fmt.Errorf("request not signed") +} + +func (ctx *signingCtx) buildTime() { + if ctx.isPresign { + duration := int64(ctx.ExpireTime / time.Second) + ctx.Query.Set("X-Amz-Date", formatTime(ctx.Time)) + ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + ctx.Request.Header.Set("X-Amz-Date", formatTime(ctx.Time)) + } +} + +func (ctx *signingCtx) buildCredentialString() { + ctx.credentialString = buildSigningScope(ctx.Region, ctx.ServiceName, ctx.Time) + + if ctx.isPresign { + ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) + } +} + +func buildQuery(r rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} +func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { + var headers []string + headers = append(headers, "host") + for k, v := range header { + if !r.IsValid(k) { + continue // ignored header + } + if ctx.SignedHeaderVals == nil { + ctx.SignedHeaderVals = make(http.Header) + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { + // include additional values + ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + ctx.SignedHeaderVals[lowerCaseKey] = v + } + sort.Strings(headers) + + ctx.signedHeaders = strings.Join(headers, ";") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) + } + + headerItems := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + if ctx.Request.Host != "" { + headerItems[i] = "host:" + ctx.Request.Host + } else { + headerItems[i] = "host:" + ctx.Request.URL.Host + } + } else { + headerValues := make([]string, len(ctx.SignedHeaderVals[k])) + for i, v := range ctx.SignedHeaderVals[k] { + headerValues[i] = strings.TrimSpace(v) + } + headerItems[i] = k + ":" + + strings.Join(headerValues, ",") + } + } + stripExcessSpaces(headerItems) + ctx.canonicalHeaders = strings.Join(headerItems, "\n") +} + +func (ctx *signingCtx) buildCanonicalString() { + ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) + + uri := getURIPath(ctx.Request.URL) + + if !ctx.DisableURIPathEscaping { + uri = rest.EscapePath(uri, false) + } + + ctx.canonicalString = strings.Join([]string{ + ctx.Request.Method, + uri, + ctx.Request.URL.RawQuery, + ctx.canonicalHeaders + "\n", + ctx.signedHeaders, + ctx.bodyDigest, + }, "\n") +} + +func (ctx *signingCtx) buildStringToSign() { + ctx.stringToSign = strings.Join([]string{ + authHeaderPrefix, + formatTime(ctx.Time), + ctx.credentialString, + hex.EncodeToString(hashSHA256([]byte(ctx.canonicalString))), + }, "\n") +} + +func (ctx *signingCtx) buildSignature() { + creds := deriveSigningKey(ctx.Region, ctx.ServiceName, ctx.credValues.SecretAccessKey, ctx.Time) + signature := hmacSHA256(creds, []byte(ctx.stringToSign)) + ctx.signature = hex.EncodeToString(signature) +} + +func (ctx *signingCtx) buildBodyDigest() error { + hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + includeSHA256Header := ctx.unsignedPayload || + ctx.ServiceName == "s3" || + ctx.ServiceName == "s3-object-lambda" || + ctx.ServiceName == "glacier" || + ctx.ServiceName == "s3-outposts" + + s3Presign := ctx.isPresign && + (ctx.ServiceName == "s3" || + ctx.ServiceName == "s3-object-lambda") + + if ctx.unsignedPayload || s3Presign { + hash = "UNSIGNED-PAYLOAD" + includeSHA256Header = !s3Presign + } else if ctx.Body == nil { + hash = emptyStringSHA256 + } else { + if !aws.IsReaderSeekable(ctx.Body) { + return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) + } + hashBytes, err := makeSha256Reader(ctx.Body) + if err != nil { + return err + } + hash = hex.EncodeToString(hashBytes) + } + + if includeSHA256Header { + ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) + } + } + ctx.bodyDigest = hash + + return nil +} + +// isRequestSigned returns if the request is currently signed or presigned +func (ctx *signingCtx) isRequestSigned() bool { + if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { + return true + } + if ctx.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (ctx *signingCtx) removePresign() { + ctx.Query.Del("X-Amz-Algorithm") + ctx.Query.Del("X-Amz-Signature") + ctx.Query.Del("X-Amz-Security-Token") + ctx.Query.Del("X-Amz-Date") + ctx.Query.Del("X-Amz-Expires") + ctx.Query.Del("X-Amz-Credential") + ctx.Query.Del("X-Amz-SignedHeaders") +} + +func hmacSHA256(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func hashSHA256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) { + hash := sha256.New() + start, err := reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + return nil, err + } + defer func() { + // ensure error is return if unable to seek back to start of payload. + _, err = reader.Seek(start, sdkio.SeekStart) + }() + + // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies + // smaller than 32KB. Fall back to io.Copy if we fail to determine the size. + size, err := aws.SeekerLen(reader) + if err != nil { + io.Copy(hash, reader) + } else { + io.CopyN(hash, reader, size) + } + + return hash.Sum(nil), nil +} + +const doubleSpace = " " + +// stripExcessSpaces will rewrite the passed in slice's string values to not +// contain multiple side-by-side spaces. +func stripExcessSpaces(vals []string) { + var j, k, l, m, spaces int + for i, str := range vals { + // Trim trailing spaces + for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { + } + + // Trim leading spaces + for k = 0; k < j && str[k] == ' '; k++ { + } + str = str[k : j+1] + + // Strip multiple spaces. + j = strings.Index(str, doubleSpace) + if j < 0 { + vals[i] = str + continue + } + + buf := []byte(str) + for k, m, l = j, j, len(buf); k < l; k++ { + if buf[k] == ' ' { + if spaces == 0 { + // First space. + buf[m] = buf[k] + m++ + } + spaces++ + } else { + // End of multiple spaces. + spaces = 0 + buf[m] = buf[k] + m++ + } + } + + vals[i] = string(buf[:m]) + } +} + +func buildSigningScope(region, service string, dt time.Time) string { + return strings.Join([]string{ + formatShortTime(dt), + region, + service, + awsV4Request, + }, "/") +} + +func deriveSigningKey(region, service, secretKey string, dt time.Time) []byte { + kDate := hmacSHA256([]byte("AWS4"+secretKey), []byte(formatShortTime(dt))) + kRegion := hmacSHA256(kDate, []byte(region)) + kService := hmacSHA256(kRegion, []byte(service)) + signingKey := hmacSHA256(kService, []byte(awsV4Request)) + return signingKey +} + +func formatShortTime(dt time.Time) string { + return dt.UTC().Format(shortTimeFormat) +} + +func formatTime(dt time.Time) string { + return dt.UTC().Format(timeFormat) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go new file mode 100644 index 000000000..98751ee84 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -0,0 +1,264 @@ +package aws + +import ( + "io" + "strings" + "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the +// SDK to accept an io.Reader that is not also an io.Seeker for unsigned +// streaming payload API operations. +// +// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API +// operation's input will prevent that operation being retried in the case of +// network errors, and cause operation requests to fail if the operation +// requires payload signing. +// +// Note: If using With S3 PutObject to stream an object upload The SDK's S3 +// Upload manager (s3manager.Uploader) provides support for streaming with the +// ability to retry network errors. +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// IsReaderSeekable returns if the underlying reader type can be seeked. A +// io.Reader might not actually be seekable if it is the ReaderSeekerCloser +// type. +func IsReaderSeekable(r io.Reader) bool { + switch v := r.(type) { + case ReaderSeekerCloser: + return v.IsSeeker() + case *ReaderSeekerCloser: + return v.IsSeeker() + case io.ReadSeeker: + return true + default: + return false + } +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be +// returned. +// +// Performs the same functionality as io.Reader Read +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// IsSeeker returns if the underlying reader is also a seeker. +func (r ReaderSeekerCloser) IsSeeker() bool { + _, ok := r.r.(io.Seeker) + return ok +} + +// HasLen returns the length of the underlying reader if the value implements +// the Len() int method. +func (r ReaderSeekerCloser) HasLen() (int, bool) { + type lenner interface { + Len() int + } + + if lr, ok := r.r.(lenner); ok { + return lr.Len(), true + } + + return 0, false +} + +// GetLen returns the length of the bytes remaining in the underlying reader. +// Checks first for Len(), then io.Seeker to determine the size of the +// underlying reader. +// +// Will return -1 if the length cannot be determined. +func (r ReaderSeekerCloser) GetLen() (int64, error) { + if l, ok := r.HasLen(); ok { + return int64(l), nil + } + + if s, ok := r.r.(io.Seeker); ok { + return seekerLen(s) + } + + return -1, nil +} + +// SeekerLen attempts to get the number of bytes remaining at the seeker's +// current position. Returns the number of bytes remaining or error. +func SeekerLen(s io.Seeker) (int64, error) { + // Determine if the seeker is actually seekable. ReaderSeekerCloser + // hides the fact that a io.Readers might not actually be seekable. + switch v := s.(type) { + case ReaderSeekerCloser: + return v.GetLen() + case *ReaderSeekerCloser: + return v.GetLen() + } + + return seekerLen(s) +} + +func seekerLen(s io.Seeker) (int64, error) { + curOffset, err := s.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + endOffset, err := s.Seek(0, sdkio.SeekEnd) + if err != nil { + return 0, err + } + + _, err = s.Seek(curOffset, sdkio.SeekStart) + if err != nil { + return 0, err + } + + return endOffset - curOffset, nil +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface +// Can be used with the s3manager.Downloader to download content to a buffer +// in memory. Safe to use concurrently. +type WriteAtBuffer struct { + buf []byte + m sync.Mutex + + // GrowthCoeff defines the growth rate of the internal buffer. By + // default, the growth rate is 1, where expanding the internal + // buffer will allocate only enough capacity to fit the new expected + // length. + GrowthCoeff float64 +} + +// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer +// provided by buf. +func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { + return &WriteAtBuffer{buf: buf} +} + +// WriteAt writes a slice of bytes to a buffer starting at the position provided +// The number of bytes written will be returned, or error. Can overwrite previous +// written slices if the write ats overlap. +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + pLen := len(p) + expLen := pos + int64(pLen) + b.m.Lock() + defer b.m.Unlock() + if int64(len(b.buf)) < expLen { + if int64(cap(b.buf)) < expLen { + if b.GrowthCoeff < 1 { + b.GrowthCoeff = 1 + } + newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) + copy(newBuf, b.buf) + b.buf = newBuf + } + b.buf = b.buf[:expLen] + } + copy(b.buf[pos:], p) + return pLen, nil +} + +// Bytes returns a slice of bytes written to the buffer. +func (b *WriteAtBuffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.buf +} + +// MultiCloser is a utility to close multiple io.Closers within a single +// statement. +type MultiCloser []io.Closer + +// Close closes all of the io.Closers making up the MultiClosers. Any +// errors that occur while closing will be returned in the order they +// occur. +func (m MultiCloser) Close() error { + var errs errors + for _, c := range m { + err := c.Close() + if err != nil { + errs = append(errs, err) + } + } + if len(errs) != 0 { + return errs + } + + return nil +} + +type errors []error + +func (es errors) Error() string { + var parts []string + for _, e := range es { + parts = append(parts, e.Error()) + } + + return strings.Join(parts, "\n") +} + +// CopySeekableBody copies the seekable body to an io.Writer +func CopySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) { + curPos, err := src.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + // copy errors may be assumed to be from the body. + n, err := io.Copy(dst, src) + if err != nil { + return n, err + } + + // seek back to the first position after reading to reset + // the body for transmission. + _, err = src.Seek(curPos, sdkio.SeekStart) + if err != nil { + return n, err + } + + return n, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go new file mode 100644 index 000000000..fed561bd5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url.go @@ -0,0 +1,13 @@ +//go:build go1.8 +// +build go1.8 + +package aws + +import "net/url" + +// URLHostname will extract the Hostname without port from the URL value. +// +// Wrapper of net/url#URL.Hostname for backwards Go version compatibility. +func URLHostname(url *url.URL) string { + return url.Hostname() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go new file mode 100644 index 000000000..95282db03 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go @@ -0,0 +1,30 @@ +//go:build !go1.8 +// +build !go1.8 + +package aws + +import ( + "net/url" + "strings" +) + +// URLHostname will extract the Hostname without port from the URL value. +// +// Copy of Go 1.8's net/url#URL.Hostname functionality. +func URLHostname(url *url.URL) string { + return stripPort(url.Host) + +} + +// stripPort is copy of Go 1.8 url#URL.Hostname functionality. +// https://golang.org/src/net/url/url.go +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go new file mode 100644 index 000000000..d15e3c84c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go" + +// SDKVersion is the version of this SDK +const SDKVersion = "1.55.5" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go new file mode 100644 index 000000000..365345353 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go @@ -0,0 +1,41 @@ +//go:build !go1.7 +// +build !go1.7 + +package context + +import "time" + +// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to +// provide a 1.6 and 1.5 safe version of context that is compatible with Go +// 1.7's Context. +// +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case BackgroundCtx: + return "aws.BackgroundContext" + } + return "unknown empty Context" +} + +// BackgroundCtx is the common base context. +var BackgroundCtx = new(emptyCtx) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go new file mode 100644 index 000000000..e83a99886 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go @@ -0,0 +1,120 @@ +package ini + +// ASTKind represents different states in the parse table +// and the type of AST that is being constructed +type ASTKind int + +// ASTKind* is used in the parse table to transition between +// the different states +const ( + ASTKindNone = ASTKind(iota) + ASTKindStart + ASTKindExpr + ASTKindEqualExpr + ASTKindStatement + ASTKindSkipStatement + ASTKindExprStatement + ASTKindSectionStatement + ASTKindNestedSectionStatement + ASTKindCompletedNestedSectionStatement + ASTKindCommentStatement + ASTKindCompletedSectionStatement +) + +func (k ASTKind) String() string { + switch k { + case ASTKindNone: + return "none" + case ASTKindStart: + return "start" + case ASTKindExpr: + return "expr" + case ASTKindStatement: + return "stmt" + case ASTKindSectionStatement: + return "section_stmt" + case ASTKindExprStatement: + return "expr_stmt" + case ASTKindCommentStatement: + return "comment" + case ASTKindNestedSectionStatement: + return "nested_section_stmt" + case ASTKindCompletedSectionStatement: + return "completed_stmt" + case ASTKindSkipStatement: + return "skip" + default: + return "" + } +} + +// AST interface allows us to determine what kind of node we +// are on and casting may not need to be necessary. +// +// The root is always the first node in Children +type AST struct { + Kind ASTKind + Root Token + RootToken bool + Children []AST +} + +func newAST(kind ASTKind, root AST, children ...AST) AST { + return AST{ + Kind: kind, + Children: append([]AST{root}, children...), + } +} + +func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST { + return AST{ + Kind: kind, + Root: root, + RootToken: true, + Children: children, + } +} + +// AppendChild will append to the list of children an AST has. +func (a *AST) AppendChild(child AST) { + a.Children = append(a.Children, child) +} + +// GetRoot will return the root AST which can be the first entry +// in the children list or a token. +func (a *AST) GetRoot() AST { + if a.RootToken { + return *a + } + + if len(a.Children) == 0 { + return AST{} + } + + return a.Children[0] +} + +// GetChildren will return the current AST's list of children +func (a *AST) GetChildren() []AST { + if len(a.Children) == 0 { + return []AST{} + } + + if a.RootToken { + return a.Children + } + + return a.Children[1:] +} + +// SetChildren will set and override all children of the AST. +func (a *AST) SetChildren(children []AST) { + if a.RootToken { + a.Children = children + } else { + a.Children = append(a.Children[:1], children...) + } +} + +// Start is used to indicate the starting state of the parse table. +var Start = newAST(ASTKindStart, AST{}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go new file mode 100644 index 000000000..0895d53cb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go @@ -0,0 +1,11 @@ +package ini + +var commaRunes = []rune(",") + +func isComma(b rune) bool { + return b == ',' +} + +func newCommaToken() Token { + return newToken(TokenComma, commaRunes, NoneType) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go new file mode 100644 index 000000000..0b76999ba --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go @@ -0,0 +1,35 @@ +package ini + +// isComment will return whether or not the next byte(s) is a +// comment. +func isComment(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case ';': + return true + case '#': + return true + } + + return false +} + +// newCommentToken will create a comment token and +// return how many bytes were read. +func newCommentToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if b[i] == '\n' { + break + } + + if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' { + break + } + } + + return newToken(TokenComment, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go new file mode 100644 index 000000000..1e55bbd07 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go @@ -0,0 +1,42 @@ +// Package ini is an LL(1) parser for configuration files. +// +// Example: +// sections, err := ini.OpenFile("/path/to/file") +// if err != nil { +// panic(err) +// } +// +// profile := "foo" +// section, ok := sections.GetSection(profile) +// if !ok { +// fmt.Printf("section %q could not be found", profile) +// } +// +// Below is the BNF that describes this parser +// Grammar: +// stmt -> section | stmt' +// stmt' -> epsilon | expr +// expr -> value (stmt)* | equal_expr (stmt)* +// equal_expr -> value ( ':' | '=' ) equal_expr' +// equal_expr' -> number | string | quoted_string +// quoted_string -> " quoted_string' +// quoted_string' -> string quoted_string_end +// quoted_string_end -> " +// +// section -> [ section' +// section' -> section_value section_close +// section_value -> number | string_subset | boolean | quoted_string_subset +// quoted_string_subset -> " quoted_string_subset' +// quoted_string_subset' -> string_subset quoted_string_end +// quoted_string_subset -> " +// section_close -> ] +// +// value -> number | string_subset | boolean +// string -> ? UTF-8 Code-Points except '\n' (U+000A) and '\r\n' (U+000D U+000A) ? +// string_subset -> ? Code-points excepted by grammar except ':' (U+003A), '=' (U+003D), '[' (U+005B), and ']' (U+005D) ? +// +// SkipState will skip (NL WS)+ +// +// comment -> # comment' | ; comment' +// comment' -> epsilon | value +package ini diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go new file mode 100644 index 000000000..04345a54c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go @@ -0,0 +1,4 @@ +package ini + +// emptyToken is used to satisfy the Token interface +var emptyToken = newToken(TokenNone, []rune{}, NoneType) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go new file mode 100644 index 000000000..91ba2a59d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go @@ -0,0 +1,24 @@ +package ini + +// newExpression will return an expression AST. +// Expr represents an expression +// +// grammar: +// expr -> string | number +func newExpression(tok Token) AST { + return newASTWithRootToken(ASTKindExpr, tok) +} + +func newEqualExpr(left AST, tok Token) AST { + return newASTWithRootToken(ASTKindEqualExpr, tok, left) +} + +// EqualExprKey will return a LHS value in the equal expr +func EqualExprKey(ast AST) string { + children := ast.GetChildren() + if len(children) == 0 || ast.Kind != ASTKindEqualExpr { + return "" + } + + return string(children[0].Root.Raw()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go new file mode 100644 index 000000000..6e545b63b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go @@ -0,0 +1,18 @@ +//go:build gofuzz +// +build gofuzz + +package ini + +import ( + "bytes" +) + +func Fuzz(data []byte) int { + b := bytes.NewReader(data) + + if _, err := Parse(b); err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go new file mode 100644 index 000000000..3b0ca7afe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go @@ -0,0 +1,51 @@ +package ini + +import ( + "io" + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// OpenFile takes a path to a given file, and will open and parse +// that file. +func OpenFile(path string) (Sections, error) { + f, err := os.Open(path) + if err != nil { + return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err) + } + defer f.Close() + + return Parse(f) +} + +// Parse will parse the given file using the shared config +// visitor. +func Parse(f io.Reader) (Sections, error) { + tree, err := ParseAST(f) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} + +// ParseBytes will parse the given bytes and return the parsed sections. +func ParseBytes(b []byte) (Sections, error) { + tree, err := ParseASTBytes(b) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go new file mode 100644 index 000000000..582c024ad --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go @@ -0,0 +1,165 @@ +package ini + +import ( + "bytes" + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // ErrCodeUnableToReadFile is used when a file is failed to be + // opened or read from. + ErrCodeUnableToReadFile = "FailedRead" +) + +// TokenType represents the various different tokens types +type TokenType int + +func (t TokenType) String() string { + switch t { + case TokenNone: + return "none" + case TokenLit: + return "literal" + case TokenSep: + return "sep" + case TokenOp: + return "op" + case TokenWS: + return "ws" + case TokenNL: + return "newline" + case TokenComment: + return "comment" + case TokenComma: + return "comma" + default: + return "" + } +} + +// TokenType enums +const ( + TokenNone = TokenType(iota) + TokenLit + TokenSep + TokenComma + TokenOp + TokenWS + TokenNL + TokenComment +) + +type iniLexer struct{} + +// Tokenize will return a list of tokens during lexical analysis of the +// io.Reader. +func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err) + } + + return l.tokenize(b) +} + +func (l *iniLexer) tokenize(b []byte) ([]Token, error) { + runes := bytes.Runes(b) + var err error + n := 0 + tokenAmount := countTokens(runes) + tokens := make([]Token, tokenAmount) + count := 0 + + for len(runes) > 0 && count < tokenAmount { + switch { + case isWhitespace(runes[0]): + tokens[count], n, err = newWSToken(runes) + case isComma(runes[0]): + tokens[count], n = newCommaToken(), 1 + case isComment(runes): + tokens[count], n, err = newCommentToken(runes) + case isNewline(runes): + tokens[count], n, err = newNewlineToken(runes) + case isSep(runes): + tokens[count], n, err = newSepToken(runes) + case isOp(runes): + tokens[count], n, err = newOpToken(runes) + default: + tokens[count], n, err = newLitToken(runes) + } + + if err != nil { + return nil, err + } + + count++ + + runes = runes[n:] + } + + return tokens[:count], nil +} + +func countTokens(runes []rune) int { + count, n := 0, 0 + var err error + + for len(runes) > 0 { + switch { + case isWhitespace(runes[0]): + _, n, err = newWSToken(runes) + case isComma(runes[0]): + _, n = newCommaToken(), 1 + case isComment(runes): + _, n, err = newCommentToken(runes) + case isNewline(runes): + _, n, err = newNewlineToken(runes) + case isSep(runes): + _, n, err = newSepToken(runes) + case isOp(runes): + _, n, err = newOpToken(runes) + default: + _, n, err = newLitToken(runes) + } + + if err != nil { + return 0 + } + + count++ + runes = runes[n:] + } + + return count + 1 +} + +// Token indicates a metadata about a given value. +type Token struct { + t TokenType + ValueType ValueType + base int + raw []rune +} + +var emptyValue = Value{} + +func newToken(t TokenType, raw []rune, v ValueType) Token { + return Token{ + t: t, + raw: raw, + ValueType: v, + } +} + +// Raw return the raw runes that were consumed +func (tok Token) Raw() []rune { + return tok.raw +} + +// Type returns the token type +func (tok Token) Type() TokenType { + return tok.t +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go new file mode 100644 index 000000000..0ba319491 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go @@ -0,0 +1,350 @@ +package ini + +import ( + "fmt" + "io" +) + +// ParseState represents the current state of the parser. +type ParseState uint + +// State enums for the parse table +const ( + InvalidState ParseState = iota + // stmt -> value stmt' + StatementState + // stmt' -> MarkComplete | op stmt + StatementPrimeState + // value -> number | string | boolean | quoted_string + ValueState + // section -> [ section' + OpenScopeState + // section' -> value section_close + SectionState + // section_close -> ] + CloseScopeState + // SkipState will skip (NL WS)+ + SkipState + // SkipTokenState will skip any token and push the previous + // state onto the stack. + SkipTokenState + // comment -> # comment' | ; comment' + // comment' -> MarkComplete | value + CommentState + // MarkComplete state will complete statements and move that + // to the completed AST list + MarkCompleteState + // TerminalState signifies that the tokens have been fully parsed + TerminalState +) + +// parseTable is a state machine to dictate the grammar above. +var parseTable = map[ASTKind]map[TokenType]ParseState{ + ASTKindStart: { + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, + ASTKindCommentStatement: { + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExpr: { + TokenOp: StatementPrimeState, + TokenLit: ValueState, + TokenSep: OpenScopeState, + TokenWS: ValueState, + TokenNL: SkipState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindEqualExpr: { + TokenLit: ValueState, + TokenSep: ValueState, + TokenOp: ValueState, + TokenWS: SkipTokenState, + TokenNL: SkipState, + TokenNone: SkipState, + }, + ASTKindStatement: { + TokenLit: SectionState, + TokenSep: CloseScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExprStatement: { + TokenLit: ValueState, + TokenSep: ValueState, + TokenOp: ValueState, + TokenWS: ValueState, + TokenNL: MarkCompleteState, + TokenComment: CommentState, + TokenNone: TerminalState, + TokenComma: SkipState, + }, + ASTKindSectionStatement: { + TokenLit: SectionState, + TokenOp: SectionState, + TokenSep: CloseScopeState, + TokenWS: SectionState, + TokenNL: SkipTokenState, + }, + ASTKindCompletedSectionStatement: { + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindSkipStatement: { + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, +} + +// ParseAST will parse input from an io.Reader using +// an LL(1) parser. +func ParseAST(r io.Reader) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.Tokenize(r) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +// ParseASTBytes will parse input from a byte slice using +// an LL(1) parser. +func ParseASTBytes(b []byte) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.tokenize(b) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +func parse(tokens []Token) ([]AST, error) { + start := Start + stack := newParseStack(3, len(tokens)) + + stack.Push(start) + s := newSkipper() + +loop: + for stack.Len() > 0 { + k := stack.Pop() + + var tok Token + if len(tokens) == 0 { + // this occurs when all the tokens have been processed + // but reduction of what's left on the stack needs to + // occur. + tok = emptyToken + } else { + tok = tokens[0] + } + + step := parseTable[k.Kind][tok.Type()] + if s.ShouldSkip(tok) { + // being in a skip state with no tokens will break out of + // the parse loop since there is nothing left to process. + if len(tokens) == 0 { + break loop + } + // if should skip is true, we skip the tokens until should skip is set to false. + step = SkipTokenState + } + + switch step { + case TerminalState: + // Finished parsing. Push what should be the last + // statement to the stack. If there is anything left + // on the stack, an error in parsing has occurred. + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + break loop + case SkipTokenState: + // When skipping a token, the previous state was popped off the stack. + // To maintain the correct state, the previous state will be pushed + // onto the stack. + stack.Push(k) + case StatementState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + expr := newExpression(tok) + stack.Push(expr) + case StatementPrimeState: + if tok.Type() != TokenOp { + stack.MarkComplete(k) + continue + } + + if k.Kind != ASTKindExpr { + return nil, NewParseError( + fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k), + ) + } + + k = trimSpaces(k) + expr := newEqualExpr(k, tok) + stack.Push(expr) + case ValueState: + // ValueState requires the previous state to either be an equal expression + // or an expression statement. + switch k.Kind { + case ASTKindEqualExpr: + // assigning a value to some key + k.AppendChild(newExpression(tok)) + stack.Push(newExprStatement(k)) + case ASTKindExpr: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stack.Push(k) + case ASTKindExprStatement: + root := k.GetRoot() + children := root.GetChildren() + if len(children) == 0 { + return nil, NewParseError( + fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind), + ) + } + + rhs := children[len(children)-1] + + if rhs.Root.ValueType != QuotedStringType { + rhs.Root.ValueType = StringType + rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...) + + } + + children[len(children)-1] = rhs + root.SetChildren(children) + + stack.Push(k) + } + case OpenScopeState: + if !runeCompare(tok.Raw(), openBrace) { + return nil, NewParseError("expected '['") + } + // If OpenScopeState is not at the start, we must mark the previous ast as complete + // + // for example: if previous ast was a skip statement; + // we should mark it as complete before we create a new statement + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + + stmt := newStatement() + stack.Push(stmt) + case CloseScopeState: + if !runeCompare(tok.Raw(), closeBrace) { + return nil, NewParseError("expected ']'") + } + + k = trimSpaces(k) + stack.Push(newCompletedSectionStatement(k)) + case SectionState: + var stmt AST + + switch k.Kind { + case ASTKindStatement: + // If there are multiple literals inside of a scope declaration, + // then the current token's raw value will be appended to the Name. + // + // This handles cases like [ profile default ] + // + // k will represent a SectionStatement with the children representing + // the label of the section + stmt = newSectionStatement(tok) + case ASTKindSectionStatement: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stmt = k + default: + return nil, NewParseError( + fmt.Sprintf("invalid statement: expected statement: %v", k.Kind), + ) + } + + stack.Push(stmt) + case MarkCompleteState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + + if stack.Len() == 0 { + stack.Push(start) + } + case SkipState: + stack.Push(newSkipStatement(k)) + s.Skip() + case CommentState: + if k.Kind == ASTKindStart { + stack.Push(k) + } else { + stack.MarkComplete(k) + } + + stmt := newCommentStatement(tok) + stack.Push(stmt) + default: + return nil, NewParseError( + fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", + k, tok.Type())) + } + + if len(tokens) > 0 { + tokens = tokens[1:] + } + } + + // this occurs when a statement has not been completed + if stack.top > 1 { + return nil, NewParseError(fmt.Sprintf("incomplete ini expression")) + } + + // returns a sublist which excludes the start symbol + return stack.List(), nil +} + +// trimSpaces will trim spaces on the left and right hand side of +// the literal. +func trimSpaces(k AST) AST { + // trim left hand side of spaces + for i := 0; i < len(k.Root.raw); i++ { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[1:] + i-- + } + + // trim right hand side of spaces + for i := len(k.Root.raw) - 1; i >= 0; i-- { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[:len(k.Root.raw)-1] + } + + return k +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go new file mode 100644 index 000000000..b1b686086 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go @@ -0,0 +1,337 @@ +package ini + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +var ( + runesTrue = []rune("true") + runesFalse = []rune("false") +) + +var literalValues = [][]rune{ + runesTrue, + runesFalse, +} + +func isBoolValue(b []rune) bool { + for _, lv := range literalValues { + if isCaselessLitValue(lv, b) { + return true + } + } + return false +} + +func isLitValue(want, have []rune) bool { + if len(have) < len(want) { + return false + } + + for i := 0; i < len(want); i++ { + if want[i] != have[i] { + return false + } + } + + return true +} + +// isCaselessLitValue is a caseless value comparison, assumes want is already lower-cased for efficiency. +func isCaselessLitValue(want, have []rune) bool { + if len(have) < len(want) { + return false + } + + for i := 0; i < len(want); i++ { + if want[i] != unicode.ToLower(have[i]) { + return false + } + } + + return true +} + +// isNumberValue will return whether not the leading characters in +// a byte slice is a number. A number is delimited by whitespace or +// the newline token. +// +// A number is defined to be in a binary, octal, decimal (int | float), hex format, +// or in scientific notation. +func isNumberValue(b []rune) bool { + negativeIndex := 0 + helper := numberHelper{} + needDigit := false + + for i := 0; i < len(b); i++ { + negativeIndex++ + + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return false + } + helper.Determine(b[i]) + needDigit = true + continue + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return false + } + negativeIndex = 0 + needDigit = true + continue + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + needDigit = true + if i == 0 { + return false + } + + fallthrough + case '.': + if err := helper.Determine(b[i]); err != nil { + return false + } + needDigit = true + continue + } + + if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) { + return !needDigit + } + + if !helper.CorrectByte(b[i]) { + return false + } + needDigit = false + } + + return !needDigit +} + +func isValid(b []rune) (bool, int, error) { + if len(b) == 0 { + // TODO: should probably return an error + return false, 0, nil + } + + return isValidRune(b[0]), 1, nil +} + +func isValidRune(r rune) bool { + return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n' +} + +// ValueType is an enum that will signify what type +// the Value is +type ValueType int + +func (v ValueType) String() string { + switch v { + case NoneType: + return "NONE" + case DecimalType: + return "FLOAT" + case IntegerType: + return "INT" + case StringType: + return "STRING" + case BoolType: + return "BOOL" + } + + return "" +} + +// ValueType enums +const ( + NoneType = ValueType(iota) + DecimalType // deprecated + IntegerType // deprecated + StringType + QuotedStringType + BoolType // deprecated +) + +// Value is a union container +type Value struct { + Type ValueType + raw []rune + + integer int64 // deprecated + decimal float64 // deprecated + boolean bool // deprecated + str string +} + +func newValue(t ValueType, base int, raw []rune) (Value, error) { + v := Value{ + Type: t, + raw: raw, + } + var err error + + switch t { + case DecimalType: + v.decimal, err = strconv.ParseFloat(string(raw), 64) + case IntegerType: + if base != 10 { + raw = raw[2:] + } + + v.integer, err = strconv.ParseInt(string(raw), base, 64) + case StringType: + v.str = string(raw) + case QuotedStringType: + v.str = string(raw[1 : len(raw)-1]) + case BoolType: + v.boolean = isCaselessLitValue(runesTrue, v.raw) + } + + // issue 2253 + // + // if the value trying to be parsed is too large, then we will use + // the 'StringType' and raw value instead. + if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange { + v.Type = StringType + v.str = string(raw) + err = nil + } + + return v, err +} + +// Append will append values and change the type to a string +// type. +func (v *Value) Append(tok Token) { + r := tok.Raw() + if v.Type != QuotedStringType { + v.Type = StringType + r = tok.raw[1 : len(tok.raw)-1] + } + if tok.Type() != TokenLit { + v.raw = append(v.raw, tok.Raw()...) + } else { + v.raw = append(v.raw, r...) + } +} + +func (v Value) String() string { + switch v.Type { + case DecimalType: + return fmt.Sprintf("decimal: %f", v.decimal) + case IntegerType: + return fmt.Sprintf("integer: %d", v.integer) + case StringType: + return fmt.Sprintf("string: %s", string(v.raw)) + case QuotedStringType: + return fmt.Sprintf("quoted string: %s", string(v.raw)) + case BoolType: + return fmt.Sprintf("bool: %t", v.boolean) + default: + return "union not set" + } +} + +func newLitToken(b []rune) (Token, int, error) { + n := 0 + var err error + + token := Token{} + if b[0] == '"' { + n, err = getStringValue(b) + if err != nil { + return token, n, err + } + + token = newToken(TokenLit, b[:n], QuotedStringType) + } else { + n, err = getValue(b) + token = newToken(TokenLit, b[:n], StringType) + } + + return token, n, err +} + +// IntValue returns an integer value +func (v Value) IntValue() (int64, bool) { + i, err := strconv.ParseInt(string(v.raw), 0, 64) + if err != nil { + return 0, false + } + return i, true +} + +// FloatValue returns a float value +func (v Value) FloatValue() (float64, bool) { + f, err := strconv.ParseFloat(string(v.raw), 64) + if err != nil { + return 0, false + } + return f, true +} + +// BoolValue returns a bool value +func (v Value) BoolValue() (bool, bool) { + // we don't use ParseBool as it recognizes more than what we've + // historically supported + if isCaselessLitValue(runesTrue, v.raw) { + return true, true + } else if isCaselessLitValue(runesFalse, v.raw) { + return false, true + } + return false, false +} + +func isTrimmable(r rune) bool { + switch r { + case '\n', ' ': + return true + } + return false +} + +// StringValue returns the string value +func (v Value) StringValue() string { + switch v.Type { + case StringType: + return strings.TrimFunc(string(v.raw), isTrimmable) + case QuotedStringType: + // preserve all characters in the quotes + return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1])) + default: + return strings.TrimFunc(string(v.raw), isTrimmable) + } +} + +func contains(runes []rune, c rune) bool { + for i := 0; i < len(runes); i++ { + if runes[i] == c { + return true + } + } + + return false +} + +func runeCompare(v1 []rune, v2 []rune) bool { + if len(v1) != len(v2) { + return false + } + + for i := 0; i < len(v1); i++ { + if v1[i] != v2[i] { + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go new file mode 100644 index 000000000..e52ac399f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go @@ -0,0 +1,30 @@ +package ini + +func isNewline(b []rune) bool { + if len(b) == 0 { + return false + } + + if b[0] == '\n' { + return true + } + + if len(b) < 2 { + return false + } + + return b[0] == '\r' && b[1] == '\n' +} + +func newNewlineToken(b []rune) (Token, int, error) { + i := 1 + if b[0] == '\r' && isNewline(b[1:]) { + i++ + } + + if !isNewline([]rune(b[:i])) { + return emptyToken, 0, NewParseError("invalid new line token") + } + + return newToken(TokenNL, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go new file mode 100644 index 000000000..a45c0bc56 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go @@ -0,0 +1,152 @@ +package ini + +import ( + "bytes" + "fmt" + "strconv" +) + +const ( + none = numberFormat(iota) + binary + octal + decimal + hex + exponent +) + +type numberFormat int + +// numberHelper is used to dictate what format a number is in +// and what to do for negative values. Since -1e-4 is a valid +// number, we cannot just simply check for duplicate negatives. +type numberHelper struct { + numberFormat numberFormat + + negative bool + negativeExponent bool +} + +func (b numberHelper) Exists() bool { + return b.numberFormat != none +} + +func (b numberHelper) IsNegative() bool { + return b.negative || b.negativeExponent +} + +func (b *numberHelper) Determine(c rune) error { + if b.Exists() { + return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c))) + } + + switch c { + case 'b': + b.numberFormat = binary + case 'o': + b.numberFormat = octal + case 'x': + b.numberFormat = hex + case 'e', 'E': + b.numberFormat = exponent + case '-': + if b.numberFormat != exponent { + b.negative = true + } else { + b.negativeExponent = true + } + case '.': + b.numberFormat = decimal + default: + return NewParseError(fmt.Sprintf("invalid number character: %v", string(c))) + } + + return nil +} + +func (b numberHelper) CorrectByte(c rune) bool { + switch { + case b.numberFormat == binary: + if !isBinaryByte(c) { + return false + } + case b.numberFormat == octal: + if !isOctalByte(c) { + return false + } + case b.numberFormat == hex: + if !isHexByte(c) { + return false + } + case b.numberFormat == decimal: + if !isDigit(c) { + return false + } + case b.numberFormat == exponent: + if !isDigit(c) { + return false + } + case b.negativeExponent: + if !isDigit(c) { + return false + } + case b.negative: + if !isDigit(c) { + return false + } + default: + if !isDigit(c) { + return false + } + } + + return true +} + +func (b numberHelper) Base() int { + switch b.numberFormat { + case binary: + return 2 + case octal: + return 8 + case hex: + return 16 + default: + return 10 + } +} + +func (b numberHelper) String() string { + buf := bytes.Buffer{} + i := 0 + + switch b.numberFormat { + case binary: + i++ + buf.WriteString(strconv.Itoa(i) + ": binary format\n") + case octal: + i++ + buf.WriteString(strconv.Itoa(i) + ": octal format\n") + case hex: + i++ + buf.WriteString(strconv.Itoa(i) + ": hex format\n") + case exponent: + i++ + buf.WriteString(strconv.Itoa(i) + ": exponent format\n") + default: + i++ + buf.WriteString(strconv.Itoa(i) + ": integer format\n") + } + + if b.negative { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative format\n") + } + + if b.negativeExponent { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n") + } + + return buf.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go new file mode 100644 index 000000000..8a84c7cbe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go @@ -0,0 +1,39 @@ +package ini + +import ( + "fmt" +) + +var ( + equalOp = []rune("=") + equalColonOp = []rune(":") +) + +func isOp(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '=': + return true + case ':': + return true + default: + return false + } +} + +func newOpToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '=': + tok = newToken(TokenOp, equalOp, NoneType) + case ':': + tok = newToken(TokenOp, equalColonOp, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go new file mode 100644 index 000000000..457287019 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go @@ -0,0 +1,43 @@ +package ini + +import "fmt" + +const ( + // ErrCodeParseError is returned when a parsing error + // has occurred. + ErrCodeParseError = "INIParseError" +) + +// ParseError is an error which is returned during any part of +// the parsing process. +type ParseError struct { + msg string +} + +// NewParseError will return a new ParseError where message +// is the description of the error. +func NewParseError(message string) *ParseError { + return &ParseError{ + msg: message, + } +} + +// Code will return the ErrCodeParseError +func (err *ParseError) Code() string { + return ErrCodeParseError +} + +// Message returns the error's message +func (err *ParseError) Message() string { + return err.msg +} + +// OrigError return nothing since there will never be any +// original error. +func (err *ParseError) OrigError() error { + return nil +} + +func (err *ParseError) Error() string { + return fmt.Sprintf("%s: %s", err.Code(), err.Message()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go new file mode 100644 index 000000000..7f01cf7c7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go @@ -0,0 +1,60 @@ +package ini + +import ( + "bytes" + "fmt" +) + +// ParseStack is a stack that contains a container, the stack portion, +// and the list which is the list of ASTs that have been successfully +// parsed. +type ParseStack struct { + top int + container []AST + list []AST + index int +} + +func newParseStack(sizeContainer, sizeList int) ParseStack { + return ParseStack{ + container: make([]AST, sizeContainer), + list: make([]AST, sizeList), + } +} + +// Pop will return and truncate the last container element. +func (s *ParseStack) Pop() AST { + s.top-- + return s.container[s.top] +} + +// Push will add the new AST to the container +func (s *ParseStack) Push(ast AST) { + s.container[s.top] = ast + s.top++ +} + +// MarkComplete will append the AST to the list of completed statements +func (s *ParseStack) MarkComplete(ast AST) { + s.list[s.index] = ast + s.index++ +} + +// List will return the completed statements +func (s ParseStack) List() []AST { + return s.list[:s.index] +} + +// Len will return the length of the container +func (s *ParseStack) Len() int { + return s.top +} + +func (s ParseStack) String() string { + buf := bytes.Buffer{} + for i, node := range s.list { + buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node)) + } + + return buf.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go new file mode 100644 index 000000000..f82095ba2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go @@ -0,0 +1,41 @@ +package ini + +import ( + "fmt" +) + +var ( + emptyRunes = []rune{} +) + +func isSep(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '[', ']': + return true + default: + return false + } +} + +var ( + openBrace = []rune("[") + closeBrace = []rune("]") +) + +func newSepToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '[': + tok = newToken(TokenSep, openBrace, NoneType) + case ']': + tok = newToken(TokenSep, closeBrace, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go new file mode 100644 index 000000000..da7a4049c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go @@ -0,0 +1,45 @@ +package ini + +// skipper is used to skip certain blocks of an ini file. +// Currently skipper is used to skip nested blocks of ini +// files. See example below +// +// [ foo ] +// nested = ; this section will be skipped +// a=b +// c=d +// bar=baz ; this will be included +type skipper struct { + shouldSkip bool + TokenSet bool + prevTok Token +} + +func newSkipper() skipper { + return skipper{ + prevTok: emptyToken, + } +} + +func (s *skipper) ShouldSkip(tok Token) bool { + // should skip state will be modified only if previous token was new line (NL); + // and the current token is not WhiteSpace (WS). + if s.shouldSkip && + s.prevTok.Type() == TokenNL && + tok.Type() != TokenWS { + s.Continue() + return false + } + s.prevTok = tok + return s.shouldSkip +} + +func (s *skipper) Skip() { + s.shouldSkip = true +} + +func (s *skipper) Continue() { + s.shouldSkip = false + // empty token is assigned as we return to default state, when should skip is false + s.prevTok = emptyToken +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go new file mode 100644 index 000000000..18f3fe893 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go @@ -0,0 +1,35 @@ +package ini + +// Statement is an empty AST mostly used for transitioning states. +func newStatement() AST { + return newAST(ASTKindStatement, AST{}) +} + +// SectionStatement represents a section AST +func newSectionStatement(tok Token) AST { + return newASTWithRootToken(ASTKindSectionStatement, tok) +} + +// ExprStatement represents a completed expression AST +func newExprStatement(ast AST) AST { + return newAST(ASTKindExprStatement, ast) +} + +// CommentStatement represents a comment in the ini definition. +// +// grammar: +// comment -> #comment' | ;comment' +// comment' -> epsilon | value +func newCommentStatement(tok Token) AST { + return newAST(ASTKindCommentStatement, newExpression(tok)) +} + +// CompletedSectionStatement represents a completed section +func newCompletedSectionStatement(ast AST) AST { + return newAST(ASTKindCompletedSectionStatement, ast) +} + +// SkipStatement is used to skip whole statements +func newSkipStatement(ast AST) AST { + return newAST(ASTKindSkipStatement, ast) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go new file mode 100644 index 000000000..b5480fdeb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go @@ -0,0 +1,284 @@ +package ini + +import ( + "fmt" +) + +// getStringValue will return a quoted string and the amount +// of bytes read +// +// an error will be returned if the string is not properly formatted +func getStringValue(b []rune) (int, error) { + if b[0] != '"' { + return 0, NewParseError("strings must start with '\"'") + } + + endQuote := false + i := 1 + + for ; i < len(b) && !endQuote; i++ { + if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped { + endQuote = true + break + } else if escaped { + /*c, err := getEscapedByte(b[i]) + if err != nil { + return 0, err + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i--*/ + + continue + } + } + + if !endQuote { + return 0, NewParseError("missing '\"' in string value") + } + + return i + 1, nil +} + +// getBoolValue will return a boolean and the amount +// of bytes read +// +// an error will be returned if the boolean is not of a correct +// value +func getBoolValue(b []rune) (int, error) { + if len(b) < 4 { + return 0, NewParseError("invalid boolean value") + } + + n := 0 + for _, lv := range literalValues { + if len(lv) > len(b) { + continue + } + + if isCaselessLitValue(lv, b) { + n = len(lv) + } + } + + if n == 0 { + return 0, NewParseError("invalid boolean value") + } + + return n, nil +} + +// getNumericalValue will return a numerical string, the amount +// of bytes read, and the base of the number +// +// an error will be returned if the number is not of a correct +// value +func getNumericalValue(b []rune) (int, int, error) { + if !isDigit(b[0]) { + return 0, 0, NewParseError("invalid digit value") + } + + i := 0 + helper := numberHelper{} + +loop: + for negativeIndex := 0; i < len(b); i++ { + negativeIndex++ + + if !isDigit(b[i]) { + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return 0, 0, NewParseError("parse error '-'") + } + + n := getNegativeNumber(b[i:]) + i += (n - 1) + helper.Determine(b[i]) + continue + case '.': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + + negativeIndex = 0 + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + if i == 0 && b[i] != '0' { + return 0, 0, NewParseError("incorrect base format, expected leading '0'") + } + + if i != 1 { + return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i)) + } + + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + default: + if isWhitespace(b[i]) { + break loop + } + + if isNewline(b[i:]) { + break loop + } + + if !(helper.numberFormat == hex && isHexByte(b[i])) { + if i+2 < len(b) && !isNewline(b[i:i+2]) { + return 0, 0, NewParseError("invalid numerical character") + } else if !isNewline([]rune{b[i]}) { + return 0, 0, NewParseError("invalid numerical character") + } + + break loop + } + } + } + } + + return helper.Base(), i, nil +} + +// isDigit will return whether or not something is an integer +func isDigit(b rune) bool { + return b >= '0' && b <= '9' +} + +func hasExponent(v []rune) bool { + return contains(v, 'e') || contains(v, 'E') +} + +func isBinaryByte(b rune) bool { + switch b { + case '0', '1': + return true + default: + return false + } +} + +func isOctalByte(b rune) bool { + switch b { + case '0', '1', '2', '3', '4', '5', '6', '7': + return true + default: + return false + } +} + +func isHexByte(b rune) bool { + if isDigit(b) { + return true + } + return (b >= 'A' && b <= 'F') || + (b >= 'a' && b <= 'f') +} + +func getValue(b []rune) (int, error) { + i := 0 + + for i < len(b) { + if isNewline(b[i:]) { + break + } + + if isOp(b[i:]) { + break + } + + valid, n, err := isValid(b[i:]) + if err != nil { + return 0, err + } + + if !valid { + break + } + + i += n + } + + return i, nil +} + +// getNegativeNumber will return a negative number from a +// byte slice. This will iterate through all characters until +// a non-digit has been found. +func getNegativeNumber(b []rune) int { + if b[0] != '-' { + return 0 + } + + i := 1 + for ; i < len(b); i++ { + if !isDigit(b[i]) { + return i + } + } + + return i +} + +// isEscaped will return whether or not the character is an escaped +// character. +func isEscaped(value []rune, b rune) bool { + if len(value) == 0 { + return false + } + + switch b { + case '\'': // single quote + case '"': // quote + case 'n': // newline + case 't': // tab + case '\\': // backslash + default: + return false + } + + return value[len(value)-1] == '\\' +} + +func getEscapedByte(b rune) (rune, error) { + switch b { + case '\'': // single quote + return '\'', nil + case '"': // quote + return '"', nil + case 'n': // newline + return '\n', nil + case 't': // table + return '\t', nil + case '\\': // backslash + return '\\', nil + default: + return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b)) + } +} + +func removeEscapedCharacters(b []rune) []rune { + for i := 0; i < len(b); i++ { + if isEscaped(b[:i], b[i]) { + c, err := getEscapedByte(b[i]) + if err != nil { + return b + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i-- + } + } + + return b +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go new file mode 100644 index 000000000..1d08e138a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go @@ -0,0 +1,169 @@ +package ini + +import ( + "fmt" + "sort" +) + +// Visitor is an interface used by walkers that will +// traverse an array of ASTs. +type Visitor interface { + VisitExpr(AST) error + VisitStatement(AST) error +} + +// DefaultVisitor is used to visit statements and expressions +// and ensure that they are both of the correct format. +// In addition, upon visiting this will build sections and populate +// the Sections field which can be used to retrieve profile +// configuration. +type DefaultVisitor struct { + scope string + Sections Sections +} + +// NewDefaultVisitor return a DefaultVisitor +func NewDefaultVisitor() *DefaultVisitor { + return &DefaultVisitor{ + Sections: Sections{ + container: map[string]Section{}, + }, + } +} + +// VisitExpr visits expressions... +func (v *DefaultVisitor) VisitExpr(expr AST) error { + t := v.Sections.container[v.scope] + if t.values == nil { + t.values = values{} + } + + switch expr.Kind { + case ASTKindExprStatement: + opExpr := expr.GetRoot() + switch opExpr.Kind { + case ASTKindEqualExpr: + children := opExpr.GetChildren() + if len(children) <= 1 { + return NewParseError("unexpected token type") + } + + rhs := children[1] + + // The right-hand value side the equality expression is allowed to contain '[', ']', ':', '=' in the values. + // If the token is not either a literal or one of the token types that identifies those four additional + // tokens then error. + if !(rhs.Root.Type() == TokenLit || rhs.Root.Type() == TokenOp || rhs.Root.Type() == TokenSep) { + return NewParseError("unexpected token type") + } + + key := EqualExprKey(opExpr) + v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw()) + if err != nil { + return err + } + + t.values[key] = v + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + + v.Sections.container[v.scope] = t + return nil +} + +// VisitStatement visits statements... +func (v *DefaultVisitor) VisitStatement(stmt AST) error { + switch stmt.Kind { + case ASTKindCompletedSectionStatement: + child := stmt.GetRoot() + if child.Kind != ASTKindSectionStatement { + return NewParseError(fmt.Sprintf("unsupported child statement: %T", child)) + } + + name := string(child.Root.Raw()) + v.Sections.container[name] = Section{} + v.scope = name + default: + return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind)) + } + + return nil +} + +// Sections is a map of Section structures that represent +// a configuration. +type Sections struct { + container map[string]Section +} + +// GetSection will return section p. If section p does not exist, +// false will be returned in the second parameter. +func (t Sections) GetSection(p string) (Section, bool) { + v, ok := t.container[p] + return v, ok +} + +// values represents a map of union values. +type values map[string]Value + +// List will return a list of all sections that were successfully +// parsed. +func (t Sections) List() []string { + keys := make([]string, len(t.container)) + i := 0 + for k := range t.container { + keys[i] = k + i++ + } + + sort.Strings(keys) + return keys +} + +// Section contains a name and values. This represent +// a sectioned entry in a configuration file. +type Section struct { + Name string + values values +} + +// Has will return whether or not an entry exists in a given section +func (t Section) Has(k string) bool { + _, ok := t.values[k] + return ok +} + +// ValueType will returned what type the union is set to. If +// k was not found, the NoneType will be returned. +func (t Section) ValueType(k string) (ValueType, bool) { + v, ok := t.values[k] + return v.Type, ok +} + +// Bool returns a bool value at k +func (t Section) Bool(k string) (bool, bool) { + return t.values[k].BoolValue() +} + +// Int returns an integer value at k +func (t Section) Int(k string) (int64, bool) { + return t.values[k].IntValue() +} + +// Float64 returns a float value at k +func (t Section) Float64(k string) (float64, bool) { + return t.values[k].FloatValue() +} + +// String returns the string value at k +func (t Section) String(k string) string { + _, ok := t.values[k] + if !ok { + return "" + } + return t.values[k].StringValue() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go new file mode 100644 index 000000000..99915f7f7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go @@ -0,0 +1,25 @@ +package ini + +// Walk will traverse the AST using the v, the Visitor. +func Walk(tree []AST, v Visitor) error { + for _, node := range tree { + switch node.Kind { + case ASTKindExpr, + ASTKindExprStatement: + + if err := v.VisitExpr(node); err != nil { + return err + } + case ASTKindStatement, + ASTKindCompletedSectionStatement, + ASTKindNestedSectionStatement, + ASTKindCompletedNestedSectionStatement: + + if err := v.VisitStatement(node); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go new file mode 100644 index 000000000..7ffb4ae06 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go @@ -0,0 +1,24 @@ +package ini + +import ( + "unicode" +) + +// isWhitespace will return whether or not the character is +// a whitespace character. +// +// Whitespace is defined as a space or tab. +func isWhitespace(c rune) bool { + return unicode.IsSpace(c) && c != '\n' && c != '\r' +} + +func newWSToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if !isWhitespace(b[i]) { + break + } + } + + return newToken(TokenWS, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/accesspoint_arn.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/accesspoint_arn.go new file mode 100644 index 000000000..bf18031a3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/accesspoint_arn.go @@ -0,0 +1,50 @@ +package arn + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/arn" +) + +// AccessPointARN provides representation +type AccessPointARN struct { + arn.ARN + AccessPointName string +} + +// GetARN returns the base ARN for the Access Point resource +func (a AccessPointARN) GetARN() arn.ARN { + return a.ARN +} + +// ParseAccessPointResource attempts to parse the ARN's resource as an +// AccessPoint resource. +// +// Supported Access point resource format: +// - Access point format: arn:{partition}:s3:{region}:{accountId}:accesspoint/{accesspointName} +// - example: arn.aws.s3.us-west-2.012345678901:accesspoint/myaccesspoint +// +func ParseAccessPointResource(a arn.ARN, resParts []string) (AccessPointARN, error) { + if len(a.Region) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "region not set"} + } + if len(a.AccountID) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "account-id not set"} + } + if len(resParts) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"} + } + if len(resParts) > 1 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "sub resource not supported"} + } + + resID := resParts[0] + if len(strings.TrimSpace(resID)) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"} + } + + return AccessPointARN{ + ARN: a, + AccessPointName: resID, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go new file mode 100644 index 000000000..216c4baab --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go @@ -0,0 +1,94 @@ +package arn + +import ( + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws/arn" +) + +var supportedServiceARN = []string{ + "s3", + "s3-outposts", + "s3-object-lambda", +} + +func isSupportedServiceARN(service string) bool { + for _, name := range supportedServiceARN { + if name == service { + return true + } + } + return false +} + +// Resource provides the interfaces abstracting ARNs of specific resource +// types. +type Resource interface { + GetARN() arn.ARN + String() string +} + +// ResourceParser provides the function for parsing an ARN's resource +// component into a typed resource. +type ResourceParser func(arn.ARN) (Resource, error) + +// ParseResource parses an AWS ARN into a typed resource for the S3 API. +func ParseResource(s string, resParser ResourceParser) (resARN Resource, err error) { + a, err := arn.Parse(s) + if err != nil { + return nil, err + } + + if len(a.Partition) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "partition not set"} + } + + if !isSupportedServiceARN(a.Service) { + return nil, InvalidARNError{ARN: a, Reason: "service is not supported"} + } + + if strings.HasPrefix(a.Region, "fips-") || strings.HasSuffix(a.Region, "-fips") { + return nil, InvalidARNError{ARN: a, Reason: "FIPS region not allowed in ARN"} + } + + if len(a.Resource) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "resource not set"} + } + + return resParser(a) +} + +// SplitResource splits the resource components by the ARN resource delimiters. +func SplitResource(v string) []string { + var parts []string + var offset int + + for offset <= len(v) { + idx := strings.IndexAny(v[offset:], "/:") + if idx < 0 { + parts = append(parts, v[offset:]) + break + } + parts = append(parts, v[offset:idx+offset]) + offset += idx + 1 + } + + return parts +} + +// IsARN returns whether the given string is an ARN +func IsARN(s string) bool { + return arn.IsARN(s) +} + +// InvalidARNError provides the error for an invalid ARN error. +type InvalidARNError struct { + ARN arn.ARN + Reason string +} + +// Error returns a string denoting the occurred InvalidARNError +func (e InvalidARNError) Error() string { + return fmt.Sprintf("invalid Amazon %s ARN, %s, %s", e.ARN.Service, e.Reason, e.ARN.String()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/outpost_arn.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/outpost_arn.go new file mode 100644 index 000000000..1e10f8de0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/outpost_arn.go @@ -0,0 +1,126 @@ +package arn + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/arn" +) + +// OutpostARN interface that should be satisfied by outpost ARNs +type OutpostARN interface { + Resource + GetOutpostID() string +} + +// ParseOutpostARNResource will parse a provided ARNs resource using the appropriate ARN format +// and return a specific OutpostARN type +// +// Currently supported outpost ARN formats: +// * Outpost AccessPoint ARN format: +// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName} +// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint +// +// * Outpost Bucket ARN format: +// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/bucket/{bucketName} +// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/bucket/mybucket +// +// Other outpost ARN formats may be supported and added in the future. +// +func ParseOutpostARNResource(a arn.ARN, resParts []string) (OutpostARN, error) { + if len(a.Region) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "region not set"} + } + + if len(a.AccountID) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "account-id not set"} + } + + // verify if outpost id is present and valid + if len(resParts) == 0 || len(strings.TrimSpace(resParts[0])) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + // verify possible resource type exists + if len(resParts) < 3 { + return nil, InvalidARNError{ + ARN: a, Reason: "incomplete outpost resource type. Expected bucket or access-point resource to be present", + } + } + + // Since we know this is a OutpostARN fetch outpostID + outpostID := strings.TrimSpace(resParts[0]) + + switch resParts[1] { + case "accesspoint": + accesspointARN, err := ParseAccessPointResource(a, resParts[2:]) + if err != nil { + return OutpostAccessPointARN{}, err + } + return OutpostAccessPointARN{ + AccessPointARN: accesspointARN, + OutpostID: outpostID, + }, nil + + case "bucket": + bucketName, err := parseBucketResource(a, resParts[2:]) + if err != nil { + return nil, err + } + return OutpostBucketARN{ + ARN: a, + BucketName: bucketName, + OutpostID: outpostID, + }, nil + + default: + return nil, InvalidARNError{ARN: a, Reason: "unknown resource set for outpost ARN"} + } +} + +// OutpostAccessPointARN represents outpost access point ARN. +type OutpostAccessPointARN struct { + AccessPointARN + OutpostID string +} + +// GetOutpostID returns the outpost id of outpost access point arn +func (o OutpostAccessPointARN) GetOutpostID() string { + return o.OutpostID +} + +// OutpostBucketARN represents the outpost bucket ARN. +type OutpostBucketARN struct { + arn.ARN + BucketName string + OutpostID string +} + +// GetOutpostID returns the outpost id of outpost bucket arn +func (o OutpostBucketARN) GetOutpostID() string { + return o.OutpostID +} + +// GetARN retrives the base ARN from outpost bucket ARN resource +func (o OutpostBucketARN) GetARN() arn.ARN { + return o.ARN +} + +// parseBucketResource attempts to parse the ARN's bucket resource and retrieve the +// bucket resource id. +// +// parseBucketResource only parses the bucket resource id. +// +func parseBucketResource(a arn.ARN, resParts []string) (bucketName string, err error) { + if len(resParts) == 0 { + return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"} + } + if len(resParts) > 1 { + return bucketName, InvalidARNError{ARN: a, Reason: "sub resource not supported"} + } + + bucketName = strings.TrimSpace(resParts[0]) + if len(bucketName) == 0 { + return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"} + } + return bucketName, err +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go new file mode 100644 index 000000000..513154cc0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go @@ -0,0 +1,15 @@ +package arn + +// S3ObjectLambdaARN represents an ARN for the s3-object-lambda service +type S3ObjectLambdaARN interface { + Resource + + isS3ObjectLambdasARN() +} + +// S3ObjectLambdaAccessPointARN is an S3ObjectLambdaARN for the Access Point resource type +type S3ObjectLambdaAccessPointARN struct { + AccessPointARN +} + +func (s S3ObjectLambdaAccessPointARN) isS3ObjectLambdasARN() {} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go new file mode 100644 index 000000000..4290ff676 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go @@ -0,0 +1,202 @@ +package s3shared + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" +) + +const ( + invalidARNErrorErrCode = "InvalidARNError" + configurationErrorErrCode = "ConfigurationError" +) + +// InvalidARNError denotes the error for Invalid ARN +type InvalidARNError struct { + message string + resource arn.Resource + origErr error +} + +// Error returns the InvalidARNError +func (e InvalidARNError) Error() string { + var extra string + if e.resource != nil { + extra = "ARN: " + e.resource.String() + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr) +} + +// Code returns the invalid ARN error code +func (e InvalidARNError) Code() string { + return invalidARNErrorErrCode +} + +// Message returns the message for Invalid ARN error +func (e InvalidARNError) Message() string { + return e.message +} + +// OrigErr is the original error wrapped by Invalid ARN Error +func (e InvalidARNError) OrigErr() error { + return e.origErr +} + +// NewInvalidARNError denotes invalid arn error +func NewInvalidARNError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "invalid ARN", + origErr: err, + resource: resource, + } +} + +// NewInvalidARNWithCustomEndpointError ARN not supported for custom clients endpoints +func NewInvalidARNWithCustomEndpointError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "resource ARN not supported with custom client endpoints", + origErr: err, + resource: resource, + } +} + +// NewInvalidARNWithUnsupportedPartitionError ARN not supported for the target partition +func NewInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "resource ARN not supported for the target ARN partition", + origErr: err, + resource: resource, + } +} + +// NewInvalidARNWithFIPSError ARN not supported for FIPS region +// +// Deprecated: FIPS will not appear in the ARN region component. +func NewInvalidARNWithFIPSError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "resource ARN not supported for FIPS region", + resource: resource, + origErr: err, + } +} + +// ConfigurationError is used to denote a client configuration error +type ConfigurationError struct { + message string + resource arn.Resource + clientPartitionID string + clientRegion string + origErr error +} + +// Error returns the Configuration error string +func (e ConfigurationError) Error() string { + extra := fmt.Sprintf("ARN: %s, client partition: %s, client region: %s", + e.resource, e.clientPartitionID, e.clientRegion) + + return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr) +} + +// Code returns configuration error's error-code +func (e ConfigurationError) Code() string { + return configurationErrorErrCode +} + +// Message returns the configuration error message +func (e ConfigurationError) Message() string { + return e.message +} + +// OrigErr is the original error wrapped by Configuration Error +func (e ConfigurationError) OrigErr() error { + return e.origErr +} + +// NewClientPartitionMismatchError stub +func NewClientPartitionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client partition does not match provided ARN partition", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientRegionMismatchError denotes cross region access error +func NewClientRegionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client region does not match provided ARN region", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewFailedToResolveEndpointError denotes endpoint resolving error +func NewFailedToResolveEndpointError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "endpoint resolver failed to find an endpoint for the provided ARN region", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForFIPSError denotes client config error for unsupported cross region FIPS access +func NewClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for fips but cross-region resource ARN provided", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewFIPSConfigurationError denotes a configuration error when a client or request is configured for FIPS +func NewFIPSConfigurationError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "use of ARN is not supported when client or request is configured for FIPS", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForAccelerateError denotes client config error for unsupported S3 accelerate +func NewClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for S3 Accelerate but is not supported with resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForCrossRegionFIPSError denotes client config error for unsupported cross region FIPS request +func NewClientConfiguredForCrossRegionFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for FIPS with cross-region enabled but is supported with cross-region resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForDualStackError denotes client config error for unsupported S3 Dual-stack +func NewClientConfiguredForDualStackError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for S3 Dual-stack but is not supported with resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go new file mode 100644 index 000000000..ef43d6c58 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go @@ -0,0 +1,45 @@ +package s3shared + +import ( + "github.com/aws/aws-sdk-go/aws" + awsarn "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" +) + +// ResourceRequest represents the request and arn resource +type ResourceRequest struct { + Resource arn.Resource + Request *request.Request +} + +// ARN returns the resource ARN +func (r ResourceRequest) ARN() awsarn.ARN { + return r.Resource.GetARN() +} + +// AllowCrossRegion returns a bool value to denote if S3UseARNRegion flag is set +func (r ResourceRequest) AllowCrossRegion() bool { + return aws.BoolValue(r.Request.Config.S3UseARNRegion) +} + +// IsCrossPartition returns true if client is configured for another partition, than +// the partition that resource ARN region resolves to. +func (r ResourceRequest) IsCrossPartition() bool { + return r.Request.ClientInfo.PartitionID != r.Resource.GetARN().Partition +} + +// IsCrossRegion returns true if ARN region is different than client configured region +func (r ResourceRequest) IsCrossRegion() bool { + return IsCrossRegion(r.Request, r.Resource.GetARN().Region) +} + +// HasCustomEndpoint returns true if custom client endpoint is provided +func (r ResourceRequest) HasCustomEndpoint() bool { + return len(aws.StringValue(r.Request.Config.Endpoint)) > 0 +} + +// IsCrossRegion returns true if request signing region is not same as configured region +func IsCrossRegion(req *request.Request, otherRegion string) bool { + return req.ClientInfo.SigningRegion != otherRegion +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/s3err/error.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/s3err/error.go new file mode 100644 index 000000000..0b9b0dfce --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/s3err/error.go @@ -0,0 +1,57 @@ +package s3err + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RequestFailure provides additional S3 specific metadata for the request +// failure. +type RequestFailure struct { + awserr.RequestFailure + + hostID string +} + +// NewRequestFailure returns a request failure error decordated with S3 +// specific metadata. +func NewRequestFailure(err awserr.RequestFailure, hostID string) *RequestFailure { + return &RequestFailure{RequestFailure: err, hostID: hostID} +} + +func (r RequestFailure) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s", + r.StatusCode(), r.RequestID(), r.hostID) + return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} +func (r RequestFailure) String() string { + return r.Error() +} + +// HostID returns the HostID request response value. +func (r RequestFailure) HostID() string { + return r.hostID +} + +// RequestFailureWrapperHandler returns a handler to rap an +// awserr.RequestFailure with the S3 request ID 2 from the response. +func RequestFailureWrapperHandler() request.NamedHandler { + return request.NamedHandler{ + Name: "awssdk.s3.errorHandler", + Fn: func(req *request.Request) { + reqErr, ok := req.Error.(awserr.RequestFailure) + if !ok || reqErr == nil { + return + } + + hostID := req.HTTPResponse.Header.Get("X-Amz-Id-2") + if req.Error == nil { + return + } + + req.Error = NewRequestFailure(reqErr, hostID) + }, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go new file mode 100644 index 000000000..6c443988b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go @@ -0,0 +1,12 @@ +package sdkio + +const ( + // Byte is 8 bits + Byte int64 = 1 + // KibiByte (KiB) is 1024 Bytes + KibiByte = Byte * 1024 + // MebiByte (MiB) is 1024 KiB + MebiByte = KibiByte * 1024 + // GibiByte (GiB) is 1024 MiB + GibiByte = MebiByte * 1024 +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go new file mode 100644 index 000000000..037a998c4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go @@ -0,0 +1,11 @@ +//go:build !go1.7 +// +build !go1.7 + +package sdkio + +// Copy of Go 1.7 io package's Seeker constants. +const ( + SeekStart = 0 // seek relative to the origin of the file + SeekCurrent = 1 // seek relative to the current offset + SeekEnd = 2 // seek relative to the end +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go new file mode 100644 index 000000000..65e7c60c4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go @@ -0,0 +1,13 @@ +//go:build go1.7 +// +build go1.7 + +package sdkio + +import "io" + +// Alias for Go 1.7 io package Seeker constants +const ( + SeekStart = io.SeekStart // seek relative to the origin of the file + SeekCurrent = io.SeekCurrent // seek relative to the current offset + SeekEnd = io.SeekEnd // seek relative to the end +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go new file mode 100644 index 000000000..a84528783 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go @@ -0,0 +1,16 @@ +//go:build go1.10 +// +build go1.10 + +package sdkmath + +import "math" + +// Round returns the nearest integer, rounding half away from zero. +// +// Special cases are: +// Round(±0) = ±0 +// Round(±Inf) = ±Inf +// Round(NaN) = NaN +func Round(x float64) float64 { + return math.Round(x) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go new file mode 100644 index 000000000..a3ae3e5db --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go @@ -0,0 +1,57 @@ +//go:build !go1.10 +// +build !go1.10 + +package sdkmath + +import "math" + +// Copied from the Go standard library's (Go 1.12) math/floor.go for use in +// Go version prior to Go 1.10. +const ( + uvone = 0x3FF0000000000000 + mask = 0x7FF + shift = 64 - 11 - 1 + bias = 1023 + signMask = 1 << 63 + fracMask = 1<= 0.5 { + // return t + Copysign(1, x) + // } + // return t + // } + bits := math.Float64bits(x) + e := uint(bits>>shift) & mask + if e < bias { + // Round abs(x) < 1 including denormals. + bits &= signMask // +-0 + if e == bias-1 { + bits |= uvone // +-1 + } + } else if e < bias+shift { + // Round any abs(x) >= 1 containing a fractional component [0,1). + // + // Numbers with larger exponents are returned unchanged since they + // must be either an integer, infinity, or NaN. + const half = 1 << (shift - 1) + e -= bias + bits += half >> e + bits &^= fracMask >> e + } + return math.Float64frombits(bits) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go new file mode 100644 index 000000000..0c9802d87 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go @@ -0,0 +1,29 @@ +package sdkrand + +import ( + "math/rand" + "sync" + "time" +) + +// lockedSource is a thread-safe implementation of rand.Source +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// SeededRand is a new RNG using a thread safe implementation of rand.Source +var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go new file mode 100644 index 000000000..4bae66cee --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go @@ -0,0 +1,12 @@ +//go:build go1.6 +// +build go1.6 + +package sdkrand + +import "math/rand" + +// Read provides the stub for math.Rand.Read method support for go version's +// 1.6 and greater. +func Read(r *rand.Rand, p []byte) (int, error) { + return r.Read(p) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go new file mode 100644 index 000000000..3a6ab8825 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go @@ -0,0 +1,25 @@ +//go:build !go1.6 +// +build !go1.6 + +package sdkrand + +import "math/rand" + +// Read backfills Go 1.6's math.Rand.Reader for Go 1.5 +func Read(r *rand.Rand, p []byte) (n int, err error) { + // Copy of Go standard libraries math package's read function not added to + // standard library until Go 1.6. + var pos int8 + var val int64 + for n = 0; n < len(p); n++ { + if pos == 0 { + val = r.Int63() + pos = 7 + } + p[n] = byte(val) + val >>= 8 + pos-- + } + + return n, err +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go new file mode 100644 index 000000000..38ea61afe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go @@ -0,0 +1,23 @@ +package sdkuri + +import ( + "path" + "strings" +) + +// PathJoin will join the elements of the path delimited by the "/" +// character. Similar to path.Join with the exception the trailing "/" +// character is preserved if present. +func PathJoin(elems ...string) string { + if len(elems) == 0 { + return "" + } + + hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/") + str := path.Join(elems...) + if hasTrailing && str != "/" { + str += "/" + } + + return str +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go new file mode 100644 index 000000000..7da8a49ce --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go @@ -0,0 +1,12 @@ +package shareddefaults + +const ( + // ECSCredsProviderEnvVar is an environmental variable key used to + // determine which path needs to be hit. + ECSCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" +) + +// ECSContainerCredentialsURI is the endpoint to retrieve container +// credentials. This can be overridden to test to ensure the credential process +// is behaving correctly. +var ECSContainerCredentialsURI = "http://169.254.170.2" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go new file mode 100644 index 000000000..34fea49ca --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go @@ -0,0 +1,46 @@ +package shareddefaults + +import ( + "os/user" + "path/filepath" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "credentials") +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "config") +} + +// UserHomeDir returns the home directory for the user the process is +// running under. +func UserHomeDir() string { + var home string + + home = userHomeDir() + if len(home) > 0 { + return home + } + + currUser, _ := user.Current() + if currUser != nil { + home = currUser.HomeDir + } + + return home +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go new file mode 100644 index 000000000..eb298ae0f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go @@ -0,0 +1,18 @@ +//go:build !go1.12 +// +build !go1.12 + +package shareddefaults + +import ( + "os" + "runtime" +) + +func userHomeDir() string { + if runtime.GOOS == "windows" { // Windows + return os.Getenv("USERPROFILE") + } + + // *nix + return os.Getenv("HOME") +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go new file mode 100644 index 000000000..51541b508 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go @@ -0,0 +1,13 @@ +//go:build go1.12 +// +build go1.12 + +package shareddefaults + +import ( + "os" +) + +func userHomeDir() string { + home, _ := os.UserHomeDir() + return home +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go b/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go new file mode 100644 index 000000000..d008ae27c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go @@ -0,0 +1,11 @@ +package strings + +import ( + "strings" +) + +// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings, +// under Unicode case-folding. +func HasPrefixFold(s, prefix string) bool { + return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go new file mode 100644 index 000000000..14ad0c589 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go @@ -0,0 +1,120 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package singleflight provides a duplicate function call suppression +// mechanism. +package singleflight + +import "sync" + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // forgotten indicates whether Forget was called with this call's key + // while the call was still in flight. + forgotten bool + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + c.val, c.err = fn() + c.wg.Done() + + g.mu.Lock() + if !c.forgotten { + delete(g.m, key) + } + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + g.mu.Unlock() +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + if c, ok := g.m[key]; ok { + c.forgotten = true + } + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go b/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go new file mode 100644 index 000000000..e045f38d8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go @@ -0,0 +1,53 @@ +package checksum + +import ( + "crypto/md5" + "encoding/base64" + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +const contentMD5Header = "Content-Md5" + +// AddBodyContentMD5Handler computes and sets the HTTP Content-MD5 header for requests that +// require it. +func AddBodyContentMD5Handler(r *request.Request) { + // if Content-MD5 header is already present, return + if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) != 0 { + return + } + + // if S3DisableContentMD5Validation flag is set, return + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + + // if request is presigned, return + if r.IsPresigned() { + return + } + + // if body is not seekable, return + if !aws.IsReaderSeekable(r.Body) { + if r.Config.Logger != nil { + r.Config.Logger.Log(fmt.Sprintf( + "Unable to compute Content-MD5 for unseekable body, S3.%s", + r.Operation.Name)) + } + return + } + + h := md5.New() + + if _, err := aws.CopySeekableBody(h, r.Body); err != nil { + r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err) + return + } + + // encode the md5 checksum in base64 and set the request header. + v := base64.StdEncoding.EncodeToString(h.Sum(nil)) + r.HTTPRequest.Header.Set(contentMD5Header, v) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go new file mode 100644 index 000000000..151054971 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go @@ -0,0 +1,144 @@ +package eventstream + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "strconv" +) + +type decodedMessage struct { + rawMessage + Headers decodedHeaders `json:"headers"` +} +type jsonMessage struct { + Length json.Number `json:"total_length"` + HeadersLen json.Number `json:"headers_length"` + PreludeCRC json.Number `json:"prelude_crc"` + Headers decodedHeaders `json:"headers"` + Payload []byte `json:"payload"` + CRC json.Number `json:"message_crc"` +} + +func (d *decodedMessage) UnmarshalJSON(b []byte) (err error) { + var jsonMsg jsonMessage + if err = json.Unmarshal(b, &jsonMsg); err != nil { + return err + } + + d.Length, err = numAsUint32(jsonMsg.Length) + if err != nil { + return err + } + d.HeadersLen, err = numAsUint32(jsonMsg.HeadersLen) + if err != nil { + return err + } + d.PreludeCRC, err = numAsUint32(jsonMsg.PreludeCRC) + if err != nil { + return err + } + d.Headers = jsonMsg.Headers + d.Payload = jsonMsg.Payload + d.CRC, err = numAsUint32(jsonMsg.CRC) + if err != nil { + return err + } + + return nil +} + +func (d *decodedMessage) MarshalJSON() ([]byte, error) { + jsonMsg := jsonMessage{ + Length: json.Number(strconv.Itoa(int(d.Length))), + HeadersLen: json.Number(strconv.Itoa(int(d.HeadersLen))), + PreludeCRC: json.Number(strconv.Itoa(int(d.PreludeCRC))), + Headers: d.Headers, + Payload: d.Payload, + CRC: json.Number(strconv.Itoa(int(d.CRC))), + } + + return json.Marshal(jsonMsg) +} + +func numAsUint32(n json.Number) (uint32, error) { + v, err := n.Int64() + if err != nil { + return 0, fmt.Errorf("failed to get int64 json number, %v", err) + } + + return uint32(v), nil +} + +func (d decodedMessage) Message() Message { + return Message{ + Headers: Headers(d.Headers), + Payload: d.Payload, + } +} + +type decodedHeaders Headers + +func (hs *decodedHeaders) UnmarshalJSON(b []byte) error { + var jsonHeaders []struct { + Name string `json:"name"` + Type valueType `json:"type"` + Value interface{} `json:"value"` + } + + decoder := json.NewDecoder(bytes.NewReader(b)) + decoder.UseNumber() + if err := decoder.Decode(&jsonHeaders); err != nil { + return err + } + + var headers Headers + for _, h := range jsonHeaders { + value, err := valueFromType(h.Type, h.Value) + if err != nil { + return err + } + headers.Set(h.Name, value) + } + *hs = decodedHeaders(headers) + + return nil +} + +func valueFromType(typ valueType, val interface{}) (Value, error) { + switch typ { + case trueValueType: + return BoolValue(true), nil + case falseValueType: + return BoolValue(false), nil + case int8ValueType: + v, err := val.(json.Number).Int64() + return Int8Value(int8(v)), err + case int16ValueType: + v, err := val.(json.Number).Int64() + return Int16Value(int16(v)), err + case int32ValueType: + v, err := val.(json.Number).Int64() + return Int32Value(int32(v)), err + case int64ValueType: + v, err := val.(json.Number).Int64() + return Int64Value(v), err + case bytesValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + return BytesValue(v), err + case stringValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + return StringValue(string(v)), err + case timestampValueType: + v, err := val.(json.Number).Int64() + return TimestampValue(timeFromEpochMilli(v)), err + case uuidValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + var tv UUIDValue + copy(tv[:], v) + return tv, err + default: + panic(fmt.Sprintf("unknown type, %s, %T", typ.String(), val)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go new file mode 100644 index 000000000..474339391 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go @@ -0,0 +1,216 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "hash/crc32" + "io" + + "github.com/aws/aws-sdk-go/aws" +) + +// Decoder provides decoding of an Event Stream messages. +type Decoder struct { + r io.Reader + logger aws.Logger +} + +// NewDecoder initializes and returns a Decoder for decoding event +// stream messages from the reader provided. +func NewDecoder(r io.Reader, opts ...func(*Decoder)) *Decoder { + d := &Decoder{ + r: r, + } + + for _, opt := range opts { + opt(d) + } + + return d +} + +// DecodeWithLogger adds a logger to be used by the decoder when decoding +// stream events. +func DecodeWithLogger(logger aws.Logger) func(*Decoder) { + return func(d *Decoder) { + d.logger = logger + } +} + +// Decode attempts to decode a single message from the event stream reader. +// Will return the event stream message, or error if Decode fails to read +// the message from the stream. +func (d *Decoder) Decode(payloadBuf []byte) (m Message, err error) { + reader := d.r + if d.logger != nil { + debugMsgBuf := bytes.NewBuffer(nil) + reader = io.TeeReader(reader, debugMsgBuf) + defer func() { + logMessageDecode(d.logger, debugMsgBuf, m, err) + }() + } + + m, err = Decode(reader, payloadBuf) + + return m, err +} + +// Decode attempts to decode a single message from the event stream reader. +// Will return the event stream message, or error if Decode fails to read +// the message from the reader. +func Decode(reader io.Reader, payloadBuf []byte) (m Message, err error) { + crc := crc32.New(crc32IEEETable) + hashReader := io.TeeReader(reader, crc) + + prelude, err := decodePrelude(hashReader, crc) + if err != nil { + return Message{}, err + } + + if prelude.HeadersLen > 0 { + lr := io.LimitReader(hashReader, int64(prelude.HeadersLen)) + m.Headers, err = decodeHeaders(lr) + if err != nil { + return Message{}, err + } + } + + if payloadLen := prelude.PayloadLen(); payloadLen > 0 { + buf, err := decodePayload(payloadBuf, io.LimitReader(hashReader, int64(payloadLen))) + if err != nil { + return Message{}, err + } + m.Payload = buf + } + + msgCRC := crc.Sum32() + if err := validateCRC(reader, msgCRC); err != nil { + return Message{}, err + } + + return m, nil +} + +func logMessageDecode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) { + w := bytes.NewBuffer(nil) + defer func() { logger.Log(w.String()) }() + + fmt.Fprintf(w, "Raw message:\n%s\n", + hex.Dump(msgBuf.Bytes())) + + if decodeErr != nil { + fmt.Fprintf(w, "Decode error: %v\n", decodeErr) + return + } + + rawMsg, err := msg.rawMessage() + if err != nil { + fmt.Fprintf(w, "failed to create raw message, %v\n", err) + return + } + + decodedMsg := decodedMessage{ + rawMessage: rawMsg, + Headers: decodedHeaders(msg.Headers), + } + + fmt.Fprintf(w, "Decoded message:\n") + encoder := json.NewEncoder(w) + if err := encoder.Encode(decodedMsg); err != nil { + fmt.Fprintf(w, "failed to generate decoded message, %v\n", err) + } +} + +func decodePrelude(r io.Reader, crc hash.Hash32) (messagePrelude, error) { + var p messagePrelude + + var err error + p.Length, err = decodeUint32(r) + if err != nil { + return messagePrelude{}, err + } + + p.HeadersLen, err = decodeUint32(r) + if err != nil { + return messagePrelude{}, err + } + + if err := p.ValidateLens(); err != nil { + return messagePrelude{}, err + } + + preludeCRC := crc.Sum32() + if err := validateCRC(r, preludeCRC); err != nil { + return messagePrelude{}, err + } + + p.PreludeCRC = preludeCRC + + return p, nil +} + +func decodePayload(buf []byte, r io.Reader) ([]byte, error) { + w := bytes.NewBuffer(buf[0:0]) + + _, err := io.Copy(w, r) + return w.Bytes(), err +} + +func decodeUint8(r io.Reader) (uint8, error) { + type byteReader interface { + ReadByte() (byte, error) + } + + if br, ok := r.(byteReader); ok { + v, err := br.ReadByte() + return uint8(v), err + } + + var b [1]byte + _, err := io.ReadFull(r, b[:]) + return uint8(b[0]), err +} +func decodeUint16(r io.Reader) (uint16, error) { + var b [2]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint16(bs), nil +} +func decodeUint32(r io.Reader) (uint32, error) { + var b [4]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint32(bs), nil +} +func decodeUint64(r io.Reader) (uint64, error) { + var b [8]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint64(bs), nil +} + +func validateCRC(r io.Reader, expect uint32) error { + msgCRC, err := decodeUint32(r) + if err != nil { + return err + } + + if msgCRC != expect { + return ChecksumError{} + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go new file mode 100644 index 000000000..ffade3bc0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go @@ -0,0 +1,162 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "hash/crc32" + "io" + + "github.com/aws/aws-sdk-go/aws" +) + +// Encoder provides EventStream message encoding. +type Encoder struct { + w io.Writer + logger aws.Logger + + headersBuf *bytes.Buffer +} + +// NewEncoder initializes and returns an Encoder to encode Event Stream +// messages to an io.Writer. +func NewEncoder(w io.Writer, opts ...func(*Encoder)) *Encoder { + e := &Encoder{ + w: w, + headersBuf: bytes.NewBuffer(nil), + } + + for _, opt := range opts { + opt(e) + } + + return e +} + +// EncodeWithLogger adds a logger to be used by the encode when decoding +// stream events. +func EncodeWithLogger(logger aws.Logger) func(*Encoder) { + return func(d *Encoder) { + d.logger = logger + } +} + +// Encode encodes a single EventStream message to the io.Writer the Encoder +// was created with. An error is returned if writing the message fails. +func (e *Encoder) Encode(msg Message) (err error) { + e.headersBuf.Reset() + + writer := e.w + if e.logger != nil { + encodeMsgBuf := bytes.NewBuffer(nil) + writer = io.MultiWriter(writer, encodeMsgBuf) + defer func() { + logMessageEncode(e.logger, encodeMsgBuf, msg, err) + }() + } + + if err = EncodeHeaders(e.headersBuf, msg.Headers); err != nil { + return err + } + + crc := crc32.New(crc32IEEETable) + hashWriter := io.MultiWriter(writer, crc) + + headersLen := uint32(e.headersBuf.Len()) + payloadLen := uint32(len(msg.Payload)) + + if err = encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil { + return err + } + + if headersLen > 0 { + if _, err = io.Copy(hashWriter, e.headersBuf); err != nil { + return err + } + } + + if payloadLen > 0 { + if _, err = hashWriter.Write(msg.Payload); err != nil { + return err + } + } + + msgCRC := crc.Sum32() + return binary.Write(writer, binary.BigEndian, msgCRC) +} + +func logMessageEncode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, encodeErr error) { + w := bytes.NewBuffer(nil) + defer func() { logger.Log(w.String()) }() + + fmt.Fprintf(w, "Message to encode:\n") + encoder := json.NewEncoder(w) + if err := encoder.Encode(msg); err != nil { + fmt.Fprintf(w, "Failed to get encoded message, %v\n", err) + } + + if encodeErr != nil { + fmt.Fprintf(w, "Encode error: %v\n", encodeErr) + return + } + + fmt.Fprintf(w, "Raw message:\n%s\n", hex.Dump(msgBuf.Bytes())) +} + +func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) error { + p := messagePrelude{ + Length: minMsgLen + headersLen + payloadLen, + HeadersLen: headersLen, + } + if err := p.ValidateLens(); err != nil { + return err + } + + err := binaryWriteFields(w, binary.BigEndian, + p.Length, + p.HeadersLen, + ) + if err != nil { + return err + } + + p.PreludeCRC = crc.Sum32() + err = binary.Write(w, binary.BigEndian, p.PreludeCRC) + if err != nil { + return err + } + + return nil +} + +// EncodeHeaders writes the header values to the writer encoded in the event +// stream format. Returns an error if a header fails to encode. +func EncodeHeaders(w io.Writer, headers Headers) error { + for _, h := range headers { + hn := headerName{ + Len: uint8(len(h.Name)), + } + copy(hn.Name[:hn.Len], h.Name) + if err := hn.encode(w); err != nil { + return err + } + + if err := h.Value.encode(w); err != nil { + return err + } + } + + return nil +} + +func binaryWriteFields(w io.Writer, order binary.ByteOrder, vs ...interface{}) error { + for _, v := range vs { + if err := binary.Write(w, order, v); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go new file mode 100644 index 000000000..5481ef307 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go @@ -0,0 +1,23 @@ +package eventstream + +import "fmt" + +// LengthError provides the error for items being larger than a maximum length. +type LengthError struct { + Part string + Want int + Have int + Value interface{} +} + +func (e LengthError) Error() string { + return fmt.Sprintf("%s length invalid, %d/%d, %v", + e.Part, e.Want, e.Have, e.Value) +} + +// ChecksumError provides the error for message checksum invalidation errors. +type ChecksumError struct{} + +func (e ChecksumError) Error() string { + return "message checksum mismatch" +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go new file mode 100644 index 000000000..0a63340e4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go @@ -0,0 +1,81 @@ +package eventstreamapi + +import ( + "fmt" + "sync" +) + +// InputWriterCloseErrorCode is used to denote an error occurred +// while closing the event stream input writer. +const InputWriterCloseErrorCode = "EventStreamInputWriterCloseError" + +type messageError struct { + code string + msg string +} + +func (e messageError) Code() string { + return e.code +} + +func (e messageError) Message() string { + return e.msg +} + +func (e messageError) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.msg) +} + +func (e messageError) OrigErr() error { + return nil +} + +// OnceError wraps the behavior of recording an error +// once and signal on a channel when this has occurred. +// Signaling is done by closing of the channel. +// +// Type is safe for concurrent usage. +type OnceError struct { + mu sync.RWMutex + err error + ch chan struct{} +} + +// NewOnceError return a new OnceError +func NewOnceError() *OnceError { + return &OnceError{ + ch: make(chan struct{}, 1), + } +} + +// Err acquires a read-lock and returns an +// error if one has been set. +func (e *OnceError) Err() error { + e.mu.RLock() + err := e.err + e.mu.RUnlock() + + return err +} + +// SetError acquires a write-lock and will set +// the underlying error value if one has not been set. +func (e *OnceError) SetError(err error) { + if err == nil { + return + } + + e.mu.Lock() + if e.err == nil { + e.err = err + close(e.ch) + } + e.mu.Unlock() +} + +// ErrorSet returns a channel that will be used to signal +// that an error has been set. This channel will be closed +// when the error value has been set for OnceError. +func (e *OnceError) ErrorSet() <-chan struct{} { + return e.ch +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go new file mode 100644 index 000000000..0e4aa42f3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go @@ -0,0 +1,173 @@ +package eventstreamapi + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" +) + +// Unmarshaler provides the interface for unmarshaling a EventStream +// message into a SDK type. +type Unmarshaler interface { + UnmarshalEvent(protocol.PayloadUnmarshaler, eventstream.Message) error +} + +// EventReader provides reading from the EventStream of an reader. +type EventReader struct { + decoder *eventstream.Decoder + + unmarshalerForEventType func(string) (Unmarshaler, error) + payloadUnmarshaler protocol.PayloadUnmarshaler + + payloadBuf []byte +} + +// NewEventReader returns a EventReader built from the reader and unmarshaler +// provided. Use ReadStream method to start reading from the EventStream. +func NewEventReader( + decoder *eventstream.Decoder, + payloadUnmarshaler protocol.PayloadUnmarshaler, + unmarshalerForEventType func(string) (Unmarshaler, error), +) *EventReader { + return &EventReader{ + decoder: decoder, + payloadUnmarshaler: payloadUnmarshaler, + unmarshalerForEventType: unmarshalerForEventType, + payloadBuf: make([]byte, 10*1024), + } +} + +// ReadEvent attempts to read a message from the EventStream and return the +// unmarshaled event value that the message is for. +// +// For EventStream API errors check if the returned error satisfies the +// awserr.Error interface to get the error's Code and Message components. +// +// EventUnmarshalers called with EventStream messages must take copies of the +// message's Payload. The payload will is reused between events read. +func (r *EventReader) ReadEvent() (event interface{}, err error) { + msg, err := r.decoder.Decode(r.payloadBuf) + if err != nil { + return nil, err + } + defer func() { + // Reclaim payload buffer for next message read. + r.payloadBuf = msg.Payload[0:0] + }() + + typ, err := GetHeaderString(msg, MessageTypeHeader) + if err != nil { + return nil, err + } + + switch typ { + case EventMessageType: + return r.unmarshalEventMessage(msg) + case ExceptionMessageType: + return nil, r.unmarshalEventException(msg) + case ErrorMessageType: + return nil, r.unmarshalErrorMessage(msg) + default: + return nil, &UnknownMessageTypeError{ + Type: typ, Message: msg.Clone(), + } + } +} + +// UnknownMessageTypeError provides an error when a message is received from +// the stream, but the reader is unable to determine what kind of message it is. +type UnknownMessageTypeError struct { + Type string + Message eventstream.Message +} + +func (e *UnknownMessageTypeError) Error() string { + return "unknown eventstream message type, " + e.Type +} + +func (r *EventReader) unmarshalEventMessage( + msg eventstream.Message, +) (event interface{}, err error) { + eventType, err := GetHeaderString(msg, EventTypeHeader) + if err != nil { + return nil, err + } + + ev, err := r.unmarshalerForEventType(eventType) + if err != nil { + return nil, err + } + + err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg) + if err != nil { + return nil, err + } + + return ev, nil +} + +func (r *EventReader) unmarshalEventException( + msg eventstream.Message, +) (err error) { + eventType, err := GetHeaderString(msg, ExceptionTypeHeader) + if err != nil { + return err + } + + ev, err := r.unmarshalerForEventType(eventType) + if err != nil { + return err + } + + err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg) + if err != nil { + return err + } + + var ok bool + err, ok = ev.(error) + if !ok { + err = messageError{ + code: "SerializationError", + msg: fmt.Sprintf( + "event stream exception %s mapped to non-error %T, %v", + eventType, ev, ev, + ), + } + } + + return err +} + +func (r *EventReader) unmarshalErrorMessage(msg eventstream.Message) (err error) { + var msgErr messageError + + msgErr.code, err = GetHeaderString(msg, ErrorCodeHeader) + if err != nil { + return err + } + + msgErr.msg, err = GetHeaderString(msg, ErrorMessageHeader) + if err != nil { + return err + } + + return msgErr +} + +// GetHeaderString returns the value of the header as a string. If the header +// is not set or the value is not a string an error will be returned. +func GetHeaderString(msg eventstream.Message, headerName string) (string, error) { + headerVal := msg.Headers.Get(headerName) + if headerVal == nil { + return "", fmt.Errorf("error header %s not present", headerName) + } + + v, ok := headerVal.Get().(string) + if !ok { + return "", fmt.Errorf("error header value is not a string, %T", headerVal) + } + + return v, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go new file mode 100644 index 000000000..e46b8acc2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go @@ -0,0 +1,23 @@ +package eventstreamapi + +// EventStream headers with specific meaning to async API functionality. +const ( + ChunkSignatureHeader = `:chunk-signature` // chunk signature for message + DateHeader = `:date` // Date header for signature + + // Message header and values + MessageTypeHeader = `:message-type` // Identifies type of message. + EventMessageType = `event` + ErrorMessageType = `error` + ExceptionMessageType = `exception` + + // Message Events + EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats". + + // Message Error + ErrorCodeHeader = `:error-code` + ErrorMessageHeader = `:error-message` + + // Message Exception + ExceptionTypeHeader = `:exception-type` +) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go new file mode 100644 index 000000000..3a7ba5cd5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go @@ -0,0 +1,123 @@ +package eventstreamapi + +import ( + "bytes" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol/eventstream" +) + +var timeNow = time.Now + +// StreamSigner defines an interface for the implementation of signing of event stream payloads +type StreamSigner interface { + GetSignature(headers, payload []byte, date time.Time) ([]byte, error) +} + +// SignEncoder envelopes event stream messages +// into an event stream message payload with included +// signature headers using the provided signer and encoder. +type SignEncoder struct { + signer StreamSigner + encoder Encoder + bufEncoder *BufferEncoder + + closeErr error + closed bool +} + +// NewSignEncoder returns a new SignEncoder using the provided stream signer and +// event stream encoder. +func NewSignEncoder(signer StreamSigner, encoder Encoder) *SignEncoder { + // TODO: Need to pass down logging + + return &SignEncoder{ + signer: signer, + encoder: encoder, + bufEncoder: NewBufferEncoder(), + } +} + +// Close encodes a final event stream signing envelope with an empty event stream +// payload. This final end-frame is used to mark the conclusion of the stream. +func (s *SignEncoder) Close() error { + if s.closed { + return s.closeErr + } + + if err := s.encode([]byte{}); err != nil { + if strings.Contains(err.Error(), "on closed pipe") { + return nil + } + + s.closeErr = err + s.closed = true + return s.closeErr + } + + return nil +} + +// Encode takes the provided message and add envelopes the message +// with the required signature. +func (s *SignEncoder) Encode(msg eventstream.Message) error { + payload, err := s.bufEncoder.Encode(msg) + if err != nil { + return err + } + + return s.encode(payload) +} + +func (s SignEncoder) encode(payload []byte) error { + date := timeNow() + + var msg eventstream.Message + msg.Headers.Set(DateHeader, eventstream.TimestampValue(date)) + msg.Payload = payload + + var headers bytes.Buffer + if err := eventstream.EncodeHeaders(&headers, msg.Headers); err != nil { + return err + } + + sig, err := s.signer.GetSignature(headers.Bytes(), msg.Payload, date) + if err != nil { + return err + } + + msg.Headers.Set(ChunkSignatureHeader, eventstream.BytesValue(sig)) + + return s.encoder.Encode(msg) +} + +// BufferEncoder is a utility that provides a buffered +// event stream encoder +type BufferEncoder struct { + encoder Encoder + buffer *bytes.Buffer +} + +// NewBufferEncoder returns a new BufferEncoder initialized +// with a 1024 byte buffer. +func NewBufferEncoder() *BufferEncoder { + buf := bytes.NewBuffer(make([]byte, 1024)) + return &BufferEncoder{ + encoder: eventstream.NewEncoder(buf), + buffer: buf, + } +} + +// Encode returns the encoded message as a byte slice. +// The returned byte slice will be modified on the next encode call +// and should not be held onto. +func (e *BufferEncoder) Encode(msg eventstream.Message) ([]byte, error) { + e.buffer.Reset() + + if err := e.encoder.Encode(msg); err != nil { + return nil, err + } + + return e.buffer.Bytes(), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go new file mode 100644 index 000000000..433bb1630 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go @@ -0,0 +1,129 @@ +package eventstreamapi + +import ( + "fmt" + "io" + "sync" + + "github.com/aws/aws-sdk-go/aws" +) + +// StreamWriter provides concurrent safe writing to an event stream. +type StreamWriter struct { + eventWriter *EventWriter + stream chan eventWriteAsyncReport + + done chan struct{} + closeOnce sync.Once + err *OnceError + + streamCloser io.Closer +} + +// NewStreamWriter returns a StreamWriter for the event writer, and stream +// closer provided. +func NewStreamWriter(eventWriter *EventWriter, streamCloser io.Closer) *StreamWriter { + w := &StreamWriter{ + eventWriter: eventWriter, + streamCloser: streamCloser, + stream: make(chan eventWriteAsyncReport), + done: make(chan struct{}), + err: NewOnceError(), + } + go w.writeStream() + + return w +} + +// Close terminates the writers ability to write new events to the stream. Any +// future call to Send will fail with an error. +func (w *StreamWriter) Close() error { + w.closeOnce.Do(w.safeClose) + return w.Err() +} + +func (w *StreamWriter) safeClose() { + close(w.done) +} + +// ErrorSet returns a channel which will be closed +// if an error occurs. +func (w *StreamWriter) ErrorSet() <-chan struct{} { + return w.err.ErrorSet() +} + +// Err returns any error that occurred while attempting to write an event to the +// stream. +func (w *StreamWriter) Err() error { + return w.err.Err() +} + +// Send writes a single event to the stream returning an error if the write +// failed. +// +// Send may be called concurrently. Events will be written to the stream +// safely. +func (w *StreamWriter) Send(ctx aws.Context, event Marshaler) error { + if err := w.Err(); err != nil { + return err + } + + resultCh := make(chan error) + wrapped := eventWriteAsyncReport{ + Event: event, + Result: resultCh, + } + + select { + case w.stream <- wrapped: + case <-ctx.Done(): + return ctx.Err() + case <-w.done: + return fmt.Errorf("stream closed, unable to send event") + } + + select { + case err := <-resultCh: + return err + case <-ctx.Done(): + return ctx.Err() + case <-w.done: + return fmt.Errorf("stream closed, unable to send event") + } +} + +func (w *StreamWriter) writeStream() { + defer w.Close() + + for { + select { + case wrapper := <-w.stream: + err := w.eventWriter.WriteEvent(wrapper.Event) + wrapper.ReportResult(w.done, err) + if err != nil { + w.err.SetError(err) + return + } + + case <-w.done: + if err := w.streamCloser.Close(); err != nil { + w.err.SetError(err) + } + return + } + } +} + +type eventWriteAsyncReport struct { + Event Marshaler + Result chan<- error +} + +func (e eventWriteAsyncReport) ReportResult(cancel <-chan struct{}, err error) bool { + select { + case e.Result <- err: + return true + case <-cancel: + return false + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport.go new file mode 100644 index 000000000..4bf2b27b2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +package eventstreamapi + +import "github.com/aws/aws-sdk-go/aws/request" + +// ApplyHTTPTransportFixes is a no-op for Go 1.18 and above. +func ApplyHTTPTransportFixes(r *request.Request) { +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport_go1.17.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport_go1.17.go new file mode 100644 index 000000000..2ee2c36fd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport_go1.17.go @@ -0,0 +1,19 @@ +//go:build !go1.18 +// +build !go1.18 + +package eventstreamapi + +import "github.com/aws/aws-sdk-go/aws/request" + +// ApplyHTTPTransportFixes applies fixes to the HTTP request for proper event +// stream functionality. Go 1.15 through 1.17 HTTP client could hang forever +// when an HTTP/2 connection failed with an non-200 status code and err. Using +// Expect 100-Continue, allows the HTTP client to gracefully handle the non-200 +// status code, and close the connection. +// +// This is a no-op for Go 1.18 and above. +func ApplyHTTPTransportFixes(r *request.Request) { + r.Handlers.Sign.PushBack(func(r *request.Request) { + r.HTTPRequest.Header.Set("Expect", "100-Continue") + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go new file mode 100644 index 000000000..7d7a79352 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go @@ -0,0 +1,63 @@ +package eventstreamapi + +import ( + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" +) + +// Marshaler provides a marshaling interface for event types to event stream +// messages. +type Marshaler interface { + MarshalEvent(protocol.PayloadMarshaler) (eventstream.Message, error) +} + +// Encoder is an stream encoder that will encode an event stream message for +// the transport. +type Encoder interface { + Encode(eventstream.Message) error +} + +// EventWriter provides a wrapper around the underlying event stream encoder +// for an io.WriteCloser. +type EventWriter struct { + encoder Encoder + payloadMarshaler protocol.PayloadMarshaler + eventTypeFor func(Marshaler) (string, error) +} + +// NewEventWriter returns a new event stream writer, that will write to the +// writer provided. Use the WriteEvent method to write an event to the stream. +func NewEventWriter(encoder Encoder, pm protocol.PayloadMarshaler, eventTypeFor func(Marshaler) (string, error), +) *EventWriter { + return &EventWriter{ + encoder: encoder, + payloadMarshaler: pm, + eventTypeFor: eventTypeFor, + } +} + +// WriteEvent writes an event to the stream. Returns an error if the event +// fails to marshal into a message, or writing to the underlying writer fails. +func (w *EventWriter) WriteEvent(event Marshaler) error { + msg, err := w.marshal(event) + if err != nil { + return err + } + + return w.encoder.Encode(msg) +} + +func (w *EventWriter) marshal(event Marshaler) (eventstream.Message, error) { + eventType, err := w.eventTypeFor(event) + if err != nil { + return eventstream.Message{}, err + } + + msg, err := event.MarshalEvent(w.payloadMarshaler) + if err != nil { + return eventstream.Message{}, err + } + + msg.Headers.Set(EventTypeHeader, eventstream.StringValue(eventType)) + return msg, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go new file mode 100644 index 000000000..f6f8c5674 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go @@ -0,0 +1,175 @@ +package eventstream + +import ( + "encoding/binary" + "fmt" + "io" +) + +// Headers are a collection of EventStream header values. +type Headers []Header + +// Header is a single EventStream Key Value header pair. +type Header struct { + Name string + Value Value +} + +// Set associates the name with a value. If the header name already exists in +// the Headers the value will be replaced with the new one. +func (hs *Headers) Set(name string, value Value) { + var i int + for ; i < len(*hs); i++ { + if (*hs)[i].Name == name { + (*hs)[i].Value = value + return + } + } + + *hs = append(*hs, Header{ + Name: name, Value: value, + }) +} + +// Get returns the Value associated with the header. Nil is returned if the +// value does not exist. +func (hs Headers) Get(name string) Value { + for i := 0; i < len(hs); i++ { + if h := hs[i]; h.Name == name { + return h.Value + } + } + return nil +} + +// Del deletes the value in the Headers if it exists. +func (hs *Headers) Del(name string) { + for i := 0; i < len(*hs); i++ { + if (*hs)[i].Name == name { + copy((*hs)[i:], (*hs)[i+1:]) + (*hs) = (*hs)[:len(*hs)-1] + } + } +} + +// Clone returns a deep copy of the headers +func (hs Headers) Clone() Headers { + o := make(Headers, 0, len(hs)) + for _, h := range hs { + o.Set(h.Name, h.Value) + } + return o +} + +func decodeHeaders(r io.Reader) (Headers, error) { + hs := Headers{} + + for { + name, err := decodeHeaderName(r) + if err != nil { + if err == io.EOF { + // EOF while getting header name means no more headers + break + } + return nil, err + } + + value, err := decodeHeaderValue(r) + if err != nil { + return nil, err + } + + hs.Set(name, value) + } + + return hs, nil +} + +func decodeHeaderName(r io.Reader) (string, error) { + var n headerName + + var err error + n.Len, err = decodeUint8(r) + if err != nil { + return "", err + } + + name := n.Name[:n.Len] + if _, err := io.ReadFull(r, name); err != nil { + return "", err + } + + return string(name), nil +} + +func decodeHeaderValue(r io.Reader) (Value, error) { + var raw rawValue + + typ, err := decodeUint8(r) + if err != nil { + return nil, err + } + raw.Type = valueType(typ) + + var v Value + + switch raw.Type { + case trueValueType: + v = BoolValue(true) + case falseValueType: + v = BoolValue(false) + case int8ValueType: + var tv Int8Value + err = tv.decode(r) + v = tv + case int16ValueType: + var tv Int16Value + err = tv.decode(r) + v = tv + case int32ValueType: + var tv Int32Value + err = tv.decode(r) + v = tv + case int64ValueType: + var tv Int64Value + err = tv.decode(r) + v = tv + case bytesValueType: + var tv BytesValue + err = tv.decode(r) + v = tv + case stringValueType: + var tv StringValue + err = tv.decode(r) + v = tv + case timestampValueType: + var tv TimestampValue + err = tv.decode(r) + v = tv + case uuidValueType: + var tv UUIDValue + err = tv.decode(r) + v = tv + default: + panic(fmt.Sprintf("unknown value type %d", raw.Type)) + } + + // Error could be EOF, let caller deal with it + return v, err +} + +const maxHeaderNameLen = 255 + +type headerName struct { + Len uint8 + Name [maxHeaderNameLen]byte +} + +func (v headerName) encode(w io.Writer) error { + if err := binary.Write(w, binary.BigEndian, v.Len); err != nil { + return err + } + + _, err := w.Write(v.Name[:v.Len]) + return err +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go new file mode 100644 index 000000000..9f509d8f6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go @@ -0,0 +1,506 @@ +package eventstream + +import ( + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "strconv" + "time" +) + +const maxHeaderValueLen = 1<<15 - 1 // 2^15-1 or 32KB - 1 + +// valueType is the EventStream header value type. +type valueType uint8 + +// Header value types +const ( + trueValueType valueType = iota + falseValueType + int8ValueType // Byte + int16ValueType // Short + int32ValueType // Integer + int64ValueType // Long + bytesValueType + stringValueType + timestampValueType + uuidValueType +) + +func (t valueType) String() string { + switch t { + case trueValueType: + return "bool" + case falseValueType: + return "bool" + case int8ValueType: + return "int8" + case int16ValueType: + return "int16" + case int32ValueType: + return "int32" + case int64ValueType: + return "int64" + case bytesValueType: + return "byte_array" + case stringValueType: + return "string" + case timestampValueType: + return "timestamp" + case uuidValueType: + return "uuid" + default: + return fmt.Sprintf("unknown value type %d", uint8(t)) + } +} + +type rawValue struct { + Type valueType + Len uint16 // Only set for variable length slices + Value []byte // byte representation of value, BigEndian encoding. +} + +func (r rawValue) encodeScalar(w io.Writer, v interface{}) error { + return binaryWriteFields(w, binary.BigEndian, + r.Type, + v, + ) +} + +func (r rawValue) encodeFixedSlice(w io.Writer, v []byte) error { + binary.Write(w, binary.BigEndian, r.Type) + + _, err := w.Write(v) + return err +} + +func (r rawValue) encodeBytes(w io.Writer, v []byte) error { + if len(v) > maxHeaderValueLen { + return LengthError{ + Part: "header value", + Want: maxHeaderValueLen, Have: len(v), + Value: v, + } + } + r.Len = uint16(len(v)) + + err := binaryWriteFields(w, binary.BigEndian, + r.Type, + r.Len, + ) + if err != nil { + return err + } + + _, err = w.Write(v) + return err +} + +func (r rawValue) encodeString(w io.Writer, v string) error { + if len(v) > maxHeaderValueLen { + return LengthError{ + Part: "header value", + Want: maxHeaderValueLen, Have: len(v), + Value: v, + } + } + r.Len = uint16(len(v)) + + type stringWriter interface { + WriteString(string) (int, error) + } + + err := binaryWriteFields(w, binary.BigEndian, + r.Type, + r.Len, + ) + if err != nil { + return err + } + + if sw, ok := w.(stringWriter); ok { + _, err = sw.WriteString(v) + } else { + _, err = w.Write([]byte(v)) + } + + return err +} + +func decodeFixedBytesValue(r io.Reader, buf []byte) error { + _, err := io.ReadFull(r, buf) + return err +} + +func decodeBytesValue(r io.Reader) ([]byte, error) { + var raw rawValue + var err error + raw.Len, err = decodeUint16(r) + if err != nil { + return nil, err + } + + buf := make([]byte, raw.Len) + _, err = io.ReadFull(r, buf) + if err != nil { + return nil, err + } + + return buf, nil +} + +func decodeStringValue(r io.Reader) (string, error) { + v, err := decodeBytesValue(r) + return string(v), err +} + +// Value represents the abstract header value. +type Value interface { + Get() interface{} + String() string + valueType() valueType + encode(io.Writer) error +} + +// An BoolValue provides eventstream encoding, and representation +// of a Go bool value. +type BoolValue bool + +// Get returns the underlying type +func (v BoolValue) Get() interface{} { + return bool(v) +} + +// valueType returns the EventStream header value type value. +func (v BoolValue) valueType() valueType { + if v { + return trueValueType + } + return falseValueType +} + +func (v BoolValue) String() string { + return strconv.FormatBool(bool(v)) +} + +// encode encodes the BoolValue into an eventstream binary value +// representation. +func (v BoolValue) encode(w io.Writer) error { + return binary.Write(w, binary.BigEndian, v.valueType()) +} + +// An Int8Value provides eventstream encoding, and representation of a Go +// int8 value. +type Int8Value int8 + +// Get returns the underlying value. +func (v Int8Value) Get() interface{} { + return int8(v) +} + +// valueType returns the EventStream header value type value. +func (Int8Value) valueType() valueType { + return int8ValueType +} + +func (v Int8Value) String() string { + return fmt.Sprintf("0x%02x", int8(v)) +} + +// encode encodes the Int8Value into an eventstream binary value +// representation. +func (v Int8Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeScalar(w, v) +} + +func (v *Int8Value) decode(r io.Reader) error { + n, err := decodeUint8(r) + if err != nil { + return err + } + + *v = Int8Value(n) + return nil +} + +// An Int16Value provides eventstream encoding, and representation of a Go +// int16 value. +type Int16Value int16 + +// Get returns the underlying value. +func (v Int16Value) Get() interface{} { + return int16(v) +} + +// valueType returns the EventStream header value type value. +func (Int16Value) valueType() valueType { + return int16ValueType +} + +func (v Int16Value) String() string { + return fmt.Sprintf("0x%04x", int16(v)) +} + +// encode encodes the Int16Value into an eventstream binary value +// representation. +func (v Int16Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int16Value) decode(r io.Reader) error { + n, err := decodeUint16(r) + if err != nil { + return err + } + + *v = Int16Value(n) + return nil +} + +// An Int32Value provides eventstream encoding, and representation of a Go +// int32 value. +type Int32Value int32 + +// Get returns the underlying value. +func (v Int32Value) Get() interface{} { + return int32(v) +} + +// valueType returns the EventStream header value type value. +func (Int32Value) valueType() valueType { + return int32ValueType +} + +func (v Int32Value) String() string { + return fmt.Sprintf("0x%08x", int32(v)) +} + +// encode encodes the Int32Value into an eventstream binary value +// representation. +func (v Int32Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int32Value) decode(r io.Reader) error { + n, err := decodeUint32(r) + if err != nil { + return err + } + + *v = Int32Value(n) + return nil +} + +// An Int64Value provides eventstream encoding, and representation of a Go +// int64 value. +type Int64Value int64 + +// Get returns the underlying value. +func (v Int64Value) Get() interface{} { + return int64(v) +} + +// valueType returns the EventStream header value type value. +func (Int64Value) valueType() valueType { + return int64ValueType +} + +func (v Int64Value) String() string { + return fmt.Sprintf("0x%016x", int64(v)) +} + +// encode encodes the Int64Value into an eventstream binary value +// representation. +func (v Int64Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int64Value) decode(r io.Reader) error { + n, err := decodeUint64(r) + if err != nil { + return err + } + + *v = Int64Value(n) + return nil +} + +// An BytesValue provides eventstream encoding, and representation of a Go +// byte slice. +type BytesValue []byte + +// Get returns the underlying value. +func (v BytesValue) Get() interface{} { + return []byte(v) +} + +// valueType returns the EventStream header value type value. +func (BytesValue) valueType() valueType { + return bytesValueType +} + +func (v BytesValue) String() string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +// encode encodes the BytesValue into an eventstream binary value +// representation. +func (v BytesValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeBytes(w, []byte(v)) +} + +func (v *BytesValue) decode(r io.Reader) error { + buf, err := decodeBytesValue(r) + if err != nil { + return err + } + + *v = BytesValue(buf) + return nil +} + +// An StringValue provides eventstream encoding, and representation of a Go +// string. +type StringValue string + +// Get returns the underlying value. +func (v StringValue) Get() interface{} { + return string(v) +} + +// valueType returns the EventStream header value type value. +func (StringValue) valueType() valueType { + return stringValueType +} + +func (v StringValue) String() string { + return string(v) +} + +// encode encodes the StringValue into an eventstream binary value +// representation. +func (v StringValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeString(w, string(v)) +} + +func (v *StringValue) decode(r io.Reader) error { + s, err := decodeStringValue(r) + if err != nil { + return err + } + + *v = StringValue(s) + return nil +} + +// An TimestampValue provides eventstream encoding, and representation of a Go +// timestamp. +type TimestampValue time.Time + +// Get returns the underlying value. +func (v TimestampValue) Get() interface{} { + return time.Time(v) +} + +// valueType returns the EventStream header value type value. +func (TimestampValue) valueType() valueType { + return timestampValueType +} + +func (v TimestampValue) epochMilli() int64 { + nano := time.Time(v).UnixNano() + msec := nano / int64(time.Millisecond) + return msec +} + +func (v TimestampValue) String() string { + msec := v.epochMilli() + return strconv.FormatInt(msec, 10) +} + +// encode encodes the TimestampValue into an eventstream binary value +// representation. +func (v TimestampValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + msec := v.epochMilli() + return raw.encodeScalar(w, msec) +} + +func (v *TimestampValue) decode(r io.Reader) error { + n, err := decodeUint64(r) + if err != nil { + return err + } + + *v = TimestampValue(timeFromEpochMilli(int64(n))) + return nil +} + +// MarshalJSON implements the json.Marshaler interface +func (v TimestampValue) MarshalJSON() ([]byte, error) { + return []byte(v.String()), nil +} + +func timeFromEpochMilli(t int64) time.Time { + secs := t / 1e3 + msec := t % 1e3 + return time.Unix(secs, msec*int64(time.Millisecond)).UTC() +} + +// An UUIDValue provides eventstream encoding, and representation of a UUID +// value. +type UUIDValue [16]byte + +// Get returns the underlying value. +func (v UUIDValue) Get() interface{} { + return v[:] +} + +// valueType returns the EventStream header value type value. +func (UUIDValue) valueType() valueType { + return uuidValueType +} + +func (v UUIDValue) String() string { + return fmt.Sprintf(`%X-%X-%X-%X-%X`, v[0:4], v[4:6], v[6:8], v[8:10], v[10:]) +} + +// encode encodes the UUIDValue into an eventstream binary value +// representation. +func (v UUIDValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeFixedSlice(w, v[:]) +} + +func (v *UUIDValue) decode(r io.Reader) error { + tv := (*v)[:] + return decodeFixedBytesValue(r, tv) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go new file mode 100644 index 000000000..f7427da03 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go @@ -0,0 +1,117 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "hash/crc32" +) + +const preludeLen = 8 +const preludeCRCLen = 4 +const msgCRCLen = 4 +const minMsgLen = preludeLen + preludeCRCLen + msgCRCLen +const maxPayloadLen = 1024 * 1024 * 16 // 16MB +const maxHeadersLen = 1024 * 128 // 128KB +const maxMsgLen = minMsgLen + maxHeadersLen + maxPayloadLen + +var crc32IEEETable = crc32.MakeTable(crc32.IEEE) + +// A Message provides the eventstream message representation. +type Message struct { + Headers Headers + Payload []byte +} + +func (m *Message) rawMessage() (rawMessage, error) { + var raw rawMessage + + if len(m.Headers) > 0 { + var headers bytes.Buffer + if err := EncodeHeaders(&headers, m.Headers); err != nil { + return rawMessage{}, err + } + raw.Headers = headers.Bytes() + raw.HeadersLen = uint32(len(raw.Headers)) + } + + raw.Length = raw.HeadersLen + uint32(len(m.Payload)) + minMsgLen + + hash := crc32.New(crc32IEEETable) + binaryWriteFields(hash, binary.BigEndian, raw.Length, raw.HeadersLen) + raw.PreludeCRC = hash.Sum32() + + binaryWriteFields(hash, binary.BigEndian, raw.PreludeCRC) + + if raw.HeadersLen > 0 { + hash.Write(raw.Headers) + } + + // Read payload bytes and update hash for it as well. + if len(m.Payload) > 0 { + raw.Payload = m.Payload + hash.Write(raw.Payload) + } + + raw.CRC = hash.Sum32() + + return raw, nil +} + +// Clone returns a deep copy of the message. +func (m Message) Clone() Message { + var payload []byte + if m.Payload != nil { + payload = make([]byte, len(m.Payload)) + copy(payload, m.Payload) + } + + return Message{ + Headers: m.Headers.Clone(), + Payload: payload, + } +} + +type messagePrelude struct { + Length uint32 + HeadersLen uint32 + PreludeCRC uint32 +} + +func (p messagePrelude) PayloadLen() uint32 { + return p.Length - p.HeadersLen - minMsgLen +} + +func (p messagePrelude) ValidateLens() error { + if p.Length == 0 || p.Length > maxMsgLen { + return LengthError{ + Part: "message prelude", + Want: maxMsgLen, + Have: int(p.Length), + } + } + if p.HeadersLen > maxHeadersLen { + return LengthError{ + Part: "message headers", + Want: maxHeadersLen, + Have: int(p.HeadersLen), + } + } + if payloadLen := p.PayloadLen(); payloadLen > maxPayloadLen { + return LengthError{ + Part: "message payload", + Want: maxPayloadLen, + Have: int(payloadLen), + } + } + + return nil +} + +type rawMessage struct { + messagePrelude + + Headers []byte + Payload []byte + + CRC uint32 +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go new file mode 100644 index 000000000..1f1d27aea --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go @@ -0,0 +1,104 @@ +package protocol + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "net" + "strconv" + "strings" +) + +// ValidateEndpointHostHandler is a request handler that will validate the +// request endpoint's hosts is a valid RFC 3986 host. +var ValidateEndpointHostHandler = request.NamedHandler{ + Name: "awssdk.protocol.ValidateEndpointHostHandler", + Fn: func(r *request.Request) { + err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host) + if err != nil { + r.Error = err + } + }, +} + +// ValidateEndpointHost validates that the host string passed in is a valid RFC +// 3986 host. Returns error if the host is not valid. +func ValidateEndpointHost(opName, host string) error { + paramErrs := request.ErrInvalidParams{Context: opName} + + var hostname string + var port string + var err error + + if strings.Contains(host, ":") { + hostname, port, err = net.SplitHostPort(host) + + if err != nil { + paramErrs.Add(request.NewErrParamFormat("endpoint", err.Error(), host)) + } + + if !ValidPortNumber(port) { + paramErrs.Add(request.NewErrParamFormat("endpoint port number", "[0-65535]", port)) + } + } else { + hostname = host + } + + labels := strings.Split(hostname, ".") + for i, label := range labels { + if i == len(labels)-1 && len(label) == 0 { + // Allow trailing dot for FQDN hosts. + continue + } + + if !ValidHostLabel(label) { + paramErrs.Add(request.NewErrParamFormat( + "endpoint host label", "[a-zA-Z0-9-]{1,63}", label)) + } + } + + if len(hostname) == 0 { + paramErrs.Add(request.NewErrParamMinLen("endpoint host", 1)) + } + + if len(hostname) > 255 { + paramErrs.Add(request.NewErrParamMaxLen( + "endpoint host", 255, host, + )) + } + + if paramErrs.Len() > 0 { + return paramErrs + } + return nil +} + +// ValidHostLabel returns if the label is a valid RFC 3986 host label. +func ValidHostLabel(label string) bool { + if l := len(label); l == 0 || l > 63 { + return false + } + for _, r := range label { + switch { + case r >= '0' && r <= '9': + case r >= 'A' && r <= 'Z': + case r >= 'a' && r <= 'z': + case r == '-': + default: + return false + } + } + + return true +} + +// ValidPortNumber return if the port is valid RFC 3986 port +func ValidPortNumber(port string) bool { + i, err := strconv.Atoi(port) + if err != nil { + return false + } + + if i < 0 || i > 65535 { + return false + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go new file mode 100644 index 000000000..915b0fcaf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go @@ -0,0 +1,54 @@ +package protocol + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// HostPrefixHandlerName is the handler name for the host prefix request +// handler. +const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler" + +// NewHostPrefixHandler constructs a build handler +func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler { + builder := HostPrefixBuilder{ + Prefix: prefix, + LabelsFn: labelsFn, + } + + return request.NamedHandler{ + Name: HostPrefixHandlerName, + Fn: builder.Build, + } +} + +// HostPrefixBuilder provides the request handler to expand and prepend +// the host prefix into the operation's request endpoint host. +type HostPrefixBuilder struct { + Prefix string + LabelsFn func() map[string]string +} + +// Build updates the passed in Request with the HostPrefix template expanded. +func (h HostPrefixBuilder) Build(r *request.Request) { + if aws.BoolValue(r.Config.DisableEndpointHostPrefix) { + return + } + + var labels map[string]string + if h.LabelsFn != nil { + labels = h.LabelsFn() + } + + prefix := h.Prefix + for name, value := range labels { + prefix = strings.Replace(prefix, "{"+name+"}", value, -1) + } + + r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host + if len(r.HTTPRequest.Host) > 0 { + r.HTTPRequest.Host = prefix + r.HTTPRequest.Host + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go new file mode 100644 index 000000000..53831dff9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go @@ -0,0 +1,75 @@ +package protocol + +import ( + "crypto/rand" + "fmt" + "reflect" +) + +// RandReader is the random reader the protocol package will use to read +// random bytes from. This is exported for testing, and should not be used. +var RandReader = rand.Reader + +const idempotencyTokenFillTag = `idempotencyToken` + +// CanSetIdempotencyToken returns true if the struct field should be +// automatically populated with a Idempotency token. +// +// Only *string and string type fields that are tagged with idempotencyToken +// which are not already set can be auto filled. +func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { + switch u := v.Interface().(type) { + // To auto fill an Idempotency token the field must be a string, + // tagged for auto fill, and have a zero value. + case *string: + return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + case string: + return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + } + + return false +} + +// GetIdempotencyToken returns a randomly generated idempotency token. +func GetIdempotencyToken() string { + b := make([]byte, 16) + RandReader.Read(b) + + return UUIDVersion4(b) +} + +// SetIdempotencyToken will set the value provided with a Idempotency Token. +// Given that the value can be set. Will panic if value is not setable. +func SetIdempotencyToken(v reflect.Value) { + if v.Kind() == reflect.Ptr { + if v.IsNil() && v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = reflect.Indirect(v) + + if !v.CanSet() { + panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) + } + + b := make([]byte, 16) + _, err := rand.Read(b) + if err != nil { + // TODO handle error + return + } + + v.Set(reflect.ValueOf(UUIDVersion4(b))) +} + +// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided +func UUIDVersion4(u []byte) string { + // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 + // 13th character is "4" + u[6] = (u[6] | 0x40) & 0x4F + // 17th character is "8", "9", "a", or "b" + u[8] = (u[8] | 0x80) & 0xBF + + return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go new file mode 100644 index 000000000..12e814ddf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go @@ -0,0 +1,309 @@ +// Package jsonutil provides JSON serialization of AWS requests and responses. +package jsonutil + +import ( + "bytes" + "encoding/base64" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + floatNaN = "NaN" + floatInf = "Infinity" + floatNegInf = "-Infinity" +) + +var timeType = reflect.ValueOf(time.Time{}).Type() +var byteSliceType = reflect.ValueOf([]byte{}).Type() + +// BuildJSON builds a JSON string for a given object v. +func BuildJSON(v interface{}) ([]byte, error) { + var buf bytes.Buffer + + err := buildAny(reflect.ValueOf(v), &buf, "") + return buf.Bytes(), err +} + +func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + origVal := value + value = reflect.Indirect(value) + if !value.IsValid() { + return nil + } + + vtype := value.Type() + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if value.Type() != timeType { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return buildStruct(value, buf, tag) + case "list": + return buildList(value, buf, tag) + case "map": + return buildMap(value, buf, tag) + default: + return buildScalar(origVal, buf, tag) + } +} + +func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + if !value.IsValid() && tag.Get("type") != "structure" { + return nil + } + } + + buf.WriteByte('{') + defer buf.WriteString("}") + + if !value.IsValid() { + return nil + } + + t := value.Type() + first := true + for i := 0; i < t.NumField(); i++ { + member := value.Field(i) + + // This allocates the most memory. + // Additionally, we cannot skip nil fields due to + // idempotency auto filling. + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("json") == "-" { + continue + } + if field.Tag.Get("location") != "" { + continue // ignore non-body elements + } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(member, field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(&token) + } + + if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { + continue // ignore unset fields + } + + if first { + first = false + } else { + buf.WriteByte(',') + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + writeString(name, buf) + buf.WriteString(`:`) + + err := buildAny(member, buf, field.Tag) + if err != nil { + return err + } + + } + + return nil +} + +func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("[") + + for i := 0; i < value.Len(); i++ { + buildAny(value.Index(i), buf, "") + + if i < value.Len()-1 { + buf.WriteString(",") + } + } + + buf.WriteString("]") + + return nil +} + +type sortedValues []reflect.Value + +func (sv sortedValues) Len() int { return len(sv) } +func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() } + +func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("{") + + sv := sortedValues(value.MapKeys()) + sort.Sort(sv) + + for i, k := range sv { + if i > 0 { + buf.WriteByte(',') + } + + writeString(k.String(), buf) + buf.WriteString(`:`) + + buildAny(value.MapIndex(k), buf, "") + } + + buf.WriteString("}") + + return nil +} + +func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + // prevents allocation on the heap. + scratch := [64]byte{} + switch value := reflect.Indirect(v); value.Kind() { + case reflect.String: + writeString(value.String(), buf) + case reflect.Bool: + if value.Bool() { + buf.WriteString("true") + } else { + buf.WriteString("false") + } + case reflect.Int64: + buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10)) + case reflect.Float64: + f := value.Float() + switch { + case math.IsNaN(f): + writeString(floatNaN, buf) + case math.IsInf(f, 1): + writeString(floatInf, buf) + case math.IsInf(f, -1): + writeString(floatNegInf, buf) + default: + buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) + } + default: + switch converted := value.Interface().(type) { + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.UnixTimeFormatName + } + + ts := protocol.FormatTime(format, converted) + if format != protocol.UnixTimeFormatName { + ts = `"` + ts + `"` + } + + buf.WriteString(ts) + case []byte: + if !value.IsNil() { + buf.WriteByte('"') + if len(converted) < 1024 { + // for small buffers, using Encode directly is much faster. + dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted))) + base64.StdEncoding.Encode(dst, converted) + buf.Write(dst) + } else { + // for large buffers, avoid unnecessary extra temporary + // buffer space. + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(converted) + enc.Close() + } + buf.WriteByte('"') + } + case aws.JSONValue: + str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape) + if err != nil { + return fmt.Errorf("unable to encode JSONValue, %v", err) + } + buf.WriteString(str) + default: + return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) + } + } + return nil +} + +var hex = "0123456789abcdef" + +func writeString(s string, buf *bytes.Buffer) { + buf.WriteByte('"') + for i := 0; i < len(s); i++ { + if s[i] == '"' { + buf.WriteString(`\"`) + } else if s[i] == '\\' { + buf.WriteString(`\\`) + } else if s[i] == '\b' { + buf.WriteString(`\b`) + } else if s[i] == '\f' { + buf.WriteString(`\f`) + } else if s[i] == '\r' { + buf.WriteString(`\r`) + } else if s[i] == '\t' { + buf.WriteString(`\t`) + } else if s[i] == '\n' { + buf.WriteString(`\n`) + } else if s[i] < 32 { + buf.WriteString("\\u00") + buf.WriteByte(hex[s[i]>>4]) + buf.WriteByte(hex[s[i]&0xF]) + } else { + buf.WriteByte(s[i]) + } + } + buf.WriteByte('"') +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go new file mode 100644 index 000000000..f9334879b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go @@ -0,0 +1,317 @@ +package jsonutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "math" + "math/big" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/private/protocol" +) + +var millisecondsFloat = new(big.Float).SetInt64(1e3) + +// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in +// type. The value to unmarshal the json document into must be a pointer to the +// type. +func UnmarshalJSONError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := json.NewDecoder(body).Decode(v) + if err != nil { + msg := "failed decoding error message" + if err == io.EOF { + msg = "error message missing" + err = nil + } + return awserr.NewUnmarshalError(err, msg, errBuf.Bytes()) + } + + return nil +} + +// UnmarshalJSON reads a stream and unmarshals the results in object v. +func UnmarshalJSON(v interface{}, stream io.Reader) error { + var out interface{} + + decoder := json.NewDecoder(stream) + decoder.UseNumber() + err := decoder.Decode(&out) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + return unmarshaler{}.unmarshalAny(reflect.ValueOf(v), out, "") +} + +// UnmarshalJSONCaseInsensitive reads a stream and unmarshals the result into the +// object v. Ignores casing for structure members. +func UnmarshalJSONCaseInsensitive(v interface{}, stream io.Reader) error { + var out interface{} + + decoder := json.NewDecoder(stream) + decoder.UseNumber() + err := decoder.Decode(&out) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + return unmarshaler{ + caseInsensitive: true, + }.unmarshalAny(reflect.ValueOf(v), out, "") +} + +type unmarshaler struct { + caseInsensitive bool +} + +func (u unmarshaler) unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { + vtype := value.Type() + if vtype.Kind() == reflect.Ptr { + vtype = vtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := value.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return u.unmarshalStruct(value, data, tag) + case "list": + return u.unmarshalList(value, data, tag) + case "map": + return u.unmarshalMap(value, data, tag) + default: + return u.unmarshalScalar(value, data, tag) + } +} + +func (u unmarshaler) unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a structure (%#v)", data) + } + + t := value.Type() + if value.Kind() == reflect.Ptr { + if value.IsNil() { // create the structure if it's nil + s := reflect.New(value.Type().Elem()) + value.Set(s) + value = s + } + + value = value.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return u.unmarshalAny(value.FieldByName(payload), data, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.PkgPath != "" { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if u.caseInsensitive { + if _, ok := mapData[name]; !ok { + // Fallback to uncased name search if the exact name didn't match. + for kn, v := range mapData { + if strings.EqualFold(kn, name) { + mapData[name] = v + } + } + } + } + + member := value.FieldByIndex(field.Index) + err := u.unmarshalAny(member, mapData[name], field.Tag) + if err != nil { + return err + } + } + return nil +} + +func (u unmarshaler) unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + listData, ok := data.([]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a list (%#v)", data) + } + + if value.IsNil() { + l := len(listData) + value.Set(reflect.MakeSlice(value.Type(), l, l)) + } + + for i, c := range listData { + err := u.unmarshalAny(value.Index(i), c, "") + if err != nil { + return err + } + } + + return nil +} + +func (u unmarshaler) unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a map (%#v)", data) + } + + if value.IsNil() { + value.Set(reflect.MakeMap(value.Type())) + } + + for k, v := range mapData { + kvalue := reflect.ValueOf(k) + vvalue := reflect.New(value.Type().Elem()).Elem() + + u.unmarshalAny(vvalue, v, "") + value.SetMapIndex(kvalue, vvalue) + } + + return nil +} + +func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { + + switch d := data.(type) { + case nil: + return nil // nothing to do here + case string: + switch value.Interface().(type) { + case *string: + value.Set(reflect.ValueOf(&d)) + case []byte: + b, err := base64.StdEncoding.DecodeString(d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(b)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + // No need to use escaping as the value is a non-quoted string. + v, err := protocol.DecodeJSONValue(d, protocol.NoEscape) + if err != nil { + return err + } + value.Set(reflect.ValueOf(v)) + case *float64: + // These are regular strings when parsed by encoding/json's unmarshaler. + switch { + case strings.EqualFold(d, floatNaN): + value.Set(reflect.ValueOf(aws.Float64(math.NaN()))) + case strings.EqualFold(d, floatInf): + value.Set(reflect.ValueOf(aws.Float64(math.Inf(1)))) + case strings.EqualFold(d, floatNegInf): + value.Set(reflect.ValueOf(aws.Float64(math.Inf(-1)))) + default: + return fmt.Errorf("unknown JSON number value: %s", d) + } + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case json.Number: + switch value.Interface().(type) { + case *int64: + // Retain the old behavior where we would just truncate the float64 + // calling d.Int64() here could cause an invalid syntax error due to the usage of strconv.ParseInt + f, err := d.Float64() + if err != nil { + return err + } + di := int64(f) + value.Set(reflect.ValueOf(&di)) + case *float64: + f, err := d.Float64() + if err != nil { + return err + } + value.Set(reflect.ValueOf(&f)) + case *time.Time: + float, ok := new(big.Float).SetString(d.String()) + if !ok { + return fmt.Errorf("unsupported float time representation: %v", d.String()) + } + float = float.Mul(float, millisecondsFloat) + ms, _ := float.Int64() + t := time.Unix(0, ms*1e6).UTC() + value.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case bool: + switch value.Interface().(type) { + case *bool: + value.Set(reflect.ValueOf(&d)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + default: + return fmt.Errorf("unsupported JSON value (%v)", data) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go new file mode 100644 index 000000000..d9aa27114 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go @@ -0,0 +1,87 @@ +// Package jsonrpc provides JSON RPC utilities for serialization of AWS +// requests and responses. +package jsonrpc + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/json.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/json.json unmarshal_test.go + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +var emptyJSON = []byte("{}") + +// BuildHandler is a named request handler for building jsonrpc protocol +// requests +var BuildHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.Build", + Fn: Build, +} + +// UnmarshalHandler is a named request handler for unmarshaling jsonrpc +// protocol requests +var UnmarshalHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.Unmarshal", + Fn: Unmarshal, +} + +// UnmarshalMetaHandler is a named request handler for unmarshaling jsonrpc +// protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.UnmarshalMeta", + Fn: UnmarshalMeta, +} + +// Build builds a JSON payload for a JSON RPC request. +func Build(req *request.Request) { + var buf []byte + var err error + if req.ParamsFilled() { + buf, err = jsonutil.BuildJSON(req.Params) + if err != nil { + req.Error = awserr.New(request.ErrCodeSerialization, "failed encoding JSON RPC request", err) + return + } + } else { + buf = emptyJSON + } + + // Always serialize the body, don't suppress it. + req.SetBufferBody(buf) + + if req.ClientInfo.TargetPrefix != "" { + target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name + req.HTTPRequest.Header.Add("X-Amz-Target", target) + } + + // Only set the content type if one is not already specified and an + // JSONVersion is specified. + if ct, v := req.HTTPRequest.Header.Get("Content-Type"), req.ClientInfo.JSONVersion; len(ct) == 0 && len(v) != 0 { + jsonVersion := req.ClientInfo.JSONVersion + req.HTTPRequest.Header.Set("Content-Type", "application/x-amz-json-"+jsonVersion) + } +} + +// Unmarshal unmarshals a response for a JSON RPC service. +func Unmarshal(req *request.Request) { + defer req.HTTPResponse.Body.Close() + if req.DataFilled() { + err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "failed decoding JSON RPC response", err), + req.HTTPResponse.StatusCode, + req.RequestID, + ) + } + } + return +} + +// UnmarshalMeta unmarshals headers from a response for a JSON RPC service. +func UnmarshalMeta(req *request.Request) { + rest.UnmarshalMeta(req) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go new file mode 100644 index 000000000..9c1ccde54 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go @@ -0,0 +1,160 @@ +package jsonrpc + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" +) + +const ( + awsQueryError = "x-amzn-query-error" + // A valid header example - "x-amzn-query-error": ";" + awsQueryErrorPartsCount = 2 +) + +// UnmarshalTypedError provides unmarshaling errors API response errors +// for both typed and untyped errors. +type UnmarshalTypedError struct { + exceptions map[string]func(protocol.ResponseMetadata) error + queryExceptions map[string]func(protocol.ResponseMetadata, string) error +} + +// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the +// set of exception names to the error unmarshalers +func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError { + return &UnmarshalTypedError{ + exceptions: exceptions, + queryExceptions: map[string]func(protocol.ResponseMetadata, string) error{}, + } +} + +// NewUnmarshalTypedErrorWithOptions works similar to NewUnmarshalTypedError applying options to the UnmarshalTypedError +// before returning it +func NewUnmarshalTypedErrorWithOptions(exceptions map[string]func(protocol.ResponseMetadata) error, optFns ...func(*UnmarshalTypedError)) *UnmarshalTypedError { + unmarshaledError := NewUnmarshalTypedError(exceptions) + for _, fn := range optFns { + fn(unmarshaledError) + } + return unmarshaledError +} + +// WithQueryCompatibility is a helper function to construct a functional option for use with NewUnmarshalTypedErrorWithOptions. +// The queryExceptions given act as an override for unmarshalling errors when query compatible error codes are found. +// See also [awsQueryCompatible trait] +// +// [awsQueryCompatible trait]: https://smithy.io/2.0/aws/protocols/aws-query-protocol.html#aws-protocols-awsquerycompatible-trait +func WithQueryCompatibility(queryExceptions map[string]func(protocol.ResponseMetadata, string) error) func(*UnmarshalTypedError) { + return func(typedError *UnmarshalTypedError) { + typedError.queryExceptions = queryExceptions + } +} + +// UnmarshalError attempts to unmarshal the HTTP response error as a known +// error type. If unable to unmarshal the error type, the generic SDK error +// type will be used. +func (u *UnmarshalTypedError) UnmarshalError( + resp *http.Response, + respMeta protocol.ResponseMetadata, +) (error, error) { + + var buf bytes.Buffer + var jsonErr jsonErrorResponse + teeReader := io.TeeReader(resp.Body, &buf) + err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader) + if err != nil { + return nil, err + } + body := ioutil.NopCloser(&buf) + + // Code may be separated by hash(#), with the last element being the code + // used by the SDK. + codeParts := strings.SplitN(jsonErr.Code, "#", 2) + code := codeParts[len(codeParts)-1] + msg := jsonErr.Message + + queryCodeParts := queryCodeParts(resp, u) + + if fn, ok := u.exceptions[code]; ok { + // If query-compatible exceptions are found and query-error-header is found, + // then use associated constructor to get exception with query error code. + // + // If exception code is known, use associated constructor to get a value + // for the exception that the JSON body can be unmarshaled into. + var v error + queryErrFn, queryExceptionsFound := u.queryExceptions[code] + if len(queryCodeParts) == awsQueryErrorPartsCount && queryExceptionsFound { + v = queryErrFn(respMeta, queryCodeParts[0]) + } else { + v = fn(respMeta) + } + err := jsonutil.UnmarshalJSONCaseInsensitive(v, body) + if err != nil { + return nil, err + } + return v, nil + } + + if len(queryCodeParts) == awsQueryErrorPartsCount && len(u.queryExceptions) > 0 { + code = queryCodeParts[0] + } + + // fallback to unmodeled generic exceptions + return awserr.NewRequestFailure( + awserr.New(code, msg, nil), + respMeta.StatusCode, + respMeta.RequestID, + ), nil +} + +// A valid header example - "x-amzn-query-error": ";" +func queryCodeParts(resp *http.Response, u *UnmarshalTypedError) []string { + queryCodeHeader := resp.Header.Get(awsQueryError) + var queryCodeParts []string + if queryCodeHeader != "" && len(u.queryExceptions) > 0 { + queryCodeParts = strings.Split(queryCodeHeader, ";") + } + return queryCodeParts +} + +// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc +// protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.UnmarshalError", + Fn: UnmarshalError, +} + +// UnmarshalError unmarshals an error response for a JSON RPC service. +func UnmarshalError(req *request.Request) { + defer req.HTTPResponse.Body.Close() + + var jsonErr jsonErrorResponse + err := jsonutil.UnmarshalJSONError(&jsonErr, req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), + req.HTTPResponse.StatusCode, + req.RequestID, + ) + return + } + + codes := strings.SplitN(jsonErr.Code, "#", 2) + req.Error = awserr.NewRequestFailure( + awserr.New(codes[len(codes)-1], jsonErr.Message, nil), + req.HTTPResponse.StatusCode, + req.RequestID, + ) +} + +type jsonErrorResponse struct { + Code string `json:"__type"` + Message string `json:"message"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go new file mode 100644 index 000000000..776d11018 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go @@ -0,0 +1,76 @@ +package protocol + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "strconv" + + "github.com/aws/aws-sdk-go/aws" +) + +// EscapeMode is the mode that should be use for escaping a value +type EscapeMode uint + +// The modes for escaping a value before it is marshaled, and unmarshaled. +const ( + NoEscape EscapeMode = iota + Base64Escape + QuotedEscape +) + +// EncodeJSONValue marshals the value into a JSON string, and optionally base64 +// encodes the string before returning it. +// +// Will panic if the escape mode is unknown. +func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) { + b, err := json.Marshal(v) + if err != nil { + return "", err + } + + switch escape { + case NoEscape: + return string(b), nil + case Base64Escape: + return base64.StdEncoding.EncodeToString(b), nil + case QuotedEscape: + return strconv.Quote(string(b)), nil + } + + panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape)) +} + +// DecodeJSONValue will attempt to decode the string input as a JSONValue. +// Optionally decoding base64 the value first before JSON unmarshaling. +// +// Will panic if the escape mode is unknown. +func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) { + var b []byte + var err error + + switch escape { + case NoEscape: + b = []byte(v) + case Base64Escape: + b, err = base64.StdEncoding.DecodeString(v) + case QuotedEscape: + var u string + u, err = strconv.Unquote(v) + b = []byte(u) + default: + panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape)) + } + + if err != nil { + return nil, err + } + + m := aws.JSONValue{} + err = json.Unmarshal(b, &m) + if err != nil { + return nil, err + } + + return m, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go new file mode 100644 index 000000000..0ea0647a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go @@ -0,0 +1,81 @@ +package protocol + +import ( + "io" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// PayloadUnmarshaler provides the interface for unmarshaling a payload's +// reader into a SDK shape. +type PayloadUnmarshaler interface { + UnmarshalPayload(io.Reader, interface{}) error +} + +// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a +// HandlerList. This provides the support for unmarshaling a payload reader to +// a shape without needing a SDK request first. +type HandlerPayloadUnmarshal struct { + Unmarshalers request.HandlerList +} + +// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using +// the Unmarshalers HandlerList provided. Returns an error if unable +// unmarshaling fails. +func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error { + req := &request.Request{ + HTTPRequest: &http.Request{}, + HTTPResponse: &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: ioutil.NopCloser(r), + }, + Data: v, + } + + h.Unmarshalers.Run(req) + + return req.Error +} + +// PayloadMarshaler provides the interface for marshaling a SDK shape into and +// io.Writer. +type PayloadMarshaler interface { + MarshalPayload(io.Writer, interface{}) error +} + +// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList. +// This provides support for marshaling a SDK shape into an io.Writer without +// needing a SDK request first. +type HandlerPayloadMarshal struct { + Marshalers request.HandlerList +} + +// MarshalPayload marshals the SDK shape into the io.Writer using the +// Marshalers HandlerList provided. Returns an error if unable if marshal +// fails. +func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error { + req := request.New( + aws.Config{}, + metadata.ClientInfo{}, + request.Handlers{}, + nil, + &request.Operation{HTTPMethod: "PUT"}, + v, + nil, + ) + + h.Marshalers.Run(req) + + if req.Error != nil { + return req.Error + } + + io.Copy(w, req.GetBody()) + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go new file mode 100644 index 000000000..9d521dcb9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go @@ -0,0 +1,49 @@ +package protocol + +import ( + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RequireHTTPMinProtocol request handler is used to enforce that +// the target endpoint supports the given major and minor HTTP protocol version. +type RequireHTTPMinProtocol struct { + Major, Minor int +} + +// Handler will mark the request.Request with an error if the +// target endpoint did not connect with the required HTTP protocol +// major and minor version. +func (p RequireHTTPMinProtocol) Handler(r *request.Request) { + if r.Error != nil || r.HTTPResponse == nil { + return + } + + if !strings.HasPrefix(r.HTTPResponse.Proto, "HTTP") { + r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) + } + + if r.HTTPResponse.ProtoMajor < p.Major || r.HTTPResponse.ProtoMinor < p.Minor { + r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) + } +} + +// ErrCodeMinimumHTTPProtocolError error code is returned when the target endpoint +// did not match the required HTTP major and minor protocol version. +const ErrCodeMinimumHTTPProtocolError = "MinimumHTTPProtocolError" + +func newMinHTTPProtoError(major, minor int, r *request.Request) error { + return awserr.NewRequestFailure( + awserr.New("MinimumHTTPProtocolError", + fmt.Sprintf( + "operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s", + major, minor, r.HTTPResponse.Proto, + ), + nil, + ), + r.HTTPResponse.StatusCode, r.RequestID, + ) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go new file mode 100644 index 000000000..d40346a77 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -0,0 +1,36 @@ +// Package query provides serialization of AWS query requests, and responses. +package query + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/query.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// BuildHandler is a named request handler for building query protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} + +// Build builds a request for an AWS Query service. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, false); err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed encoding Query request", err) + return + } + + if !r.IsPresigned() { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go new file mode 100644 index 000000000..2ca0b19db --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -0,0 +1,276 @@ +package queryutil + +import ( + "encoding/base64" + "fmt" + "math" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + floatNaN = "NaN" + floatInf = "Infinity" + floatNegInf = "-Infinity" +) + +// Parse parses an object i and fills a url.Values object. The isEC2 flag +// indicates if this is the EC2 Query sub-protocol. +func Parse(body url.Values, i interface{}, isEC2 bool) error { + q := queryParser{isEC2: isEC2} + return q.parseValue(body, reflect.ValueOf(i), "", "") +} + +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +type queryParser struct { + isEC2 bool +} + +func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + value = elemOf(value) + + // no need to handle zero values + if !value.IsValid() { + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + return q.parseStruct(v, value, prefix) + case "list": + return q.parseList(v, value, prefix, tag) + case "map": + return q.parseMap(v, value, prefix, tag) + default: + return q.parseScalar(v, value, prefix, tag) + } +} + +func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { + if !value.IsValid() { + return nil + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + elemValue := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + elemValue = reflect.ValueOf(token) + } + + var name string + if q.isEC2 { + name = field.Tag.Get("queryName") + } + if name == "" { + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if name != "" && q.isEC2 { + name = strings.ToUpper(name[0:1]) + name[1:] + } + } + if name == "" { + name = field.Name + } + + if prefix != "" { + name = prefix + "." + name + } + + if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, and not ec2, generate an empty value + if !value.IsNil() && value.Len() == 0 && !q.isEC2 { + v.Set(prefix, "") + return nil + } + + if _, ok := value.Interface().([]byte); ok { + return q.parseScalar(v, value, prefix, tag) + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + if listName := tag.Get("locationNameList"); listName == "" { + prefix += ".member" + } else { + prefix += "." + listName + } + } + + for i := 0; i < value.Len(); i++ { + slicePrefix := prefix + if slicePrefix == "" { + slicePrefix = strconv.Itoa(i + 1) + } else { + slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) + } + if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".entry" + } + + // sort keys for improved serialization consistency. + // this is not strictly necessary for protocol support. + mapKeyValues := value.MapKeys() + mapKeys := map[string]reflect.Value{} + mapKeyNames := make([]string, len(mapKeyValues)) + for i, mapKey := range mapKeyValues { + name := mapKey.String() + mapKeys[name] = mapKey + mapKeyNames[i] = name + } + sort.Strings(mapKeyNames) + + for i, mapKeyName := range mapKeyNames { + mapKey := mapKeys[mapKeyName] + mapValue := value.MapIndex(mapKey) + + kname := tag.Get("locationNameKey") + if kname == "" { + kname = "key" + } + vname := tag.Get("locationNameValue") + if vname == "" { + vname = "value" + } + + // serialize key + var keyName string + if prefix == "" { + keyName = strconv.Itoa(i+1) + "." + kname + } else { + keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname + } + + if err := q.parseValue(v, mapKey, keyName, ""); err != nil { + return err + } + + // serialize value + var valueName string + if prefix == "" { + valueName = strconv.Itoa(i+1) + "." + vname + } else { + valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname + } + + if err := q.parseValue(v, mapValue, valueName, ""); err != nil { + return err + } + } + + return nil +} + +func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { + switch value := r.Interface().(type) { + case string: + v.Set(name, value) + case []byte: + if !r.IsNil() { + v.Set(name, base64.StdEncoding.EncodeToString(value)) + } + case bool: + v.Set(name, strconv.FormatBool(value)) + case int64: + v.Set(name, strconv.FormatInt(value, 10)) + case int: + v.Set(name, strconv.Itoa(value)) + case float64: + var str string + switch { + case math.IsNaN(value): + str = floatNaN + case math.IsInf(value, 1): + str = floatInf + case math.IsInf(value, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(value, 'f', -1, 64) + } + v.Set(name, str) + case float32: + asFloat64 := float64(value) + var str string + switch { + case math.IsNaN(asFloat64): + str = floatNaN + case math.IsInf(asFloat64, 1): + str = floatInf + case math.IsInf(asFloat64, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(asFloat64, 'f', -1, 32) + } + v.Set(name, str) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + v.Set(name, protocol.FormatTime(format, value)) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go new file mode 100644 index 000000000..9231e95d1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -0,0 +1,39 @@ +package query + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/query.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalHandler is a named request handler for unmarshaling query protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals a response for an AWS Query service. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "failed decoding Query response", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + } +} + +// UnmarshalMeta unmarshals header response values for an AWS Query service. +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go new file mode 100644 index 000000000..2c0cbba90 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -0,0 +1,70 @@ +package query + +import ( + "encoding/xml" + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + +type xmlErrorResponse struct { + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +type xmlResponseError struct { + xmlErrorResponse +} + +func (e *xmlResponseError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + const svcUnavailableTagName = "ServiceUnavailableException" + const errorResponseTagName = "ErrorResponse" + + switch start.Name.Local { + case svcUnavailableTagName: + e.Code = svcUnavailableTagName + e.Message = "service is unavailable" + return d.Skip() + + case errorResponseTagName: + return d.DecodeElement(&e.xmlErrorResponse, &start) + + default: + return fmt.Errorf("unknown error response tag, %v", start) + } +} + +// UnmarshalError unmarshals an error response for an AWS Query service. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var respErr xmlResponseError + err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + reqID := respErr.RequestID + if len(reqID) == 0 { + reqID = r.RequestID + } + + r.Error = awserr.NewRequestFailure( + awserr.New(strings.TrimSpace(respErr.Code), strings.TrimSpace(respErr.Message), nil), + r.HTTPResponse.StatusCode, + reqID, + ) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go new file mode 100644 index 000000000..ecc521f88 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -0,0 +1,353 @@ +// Package rest provides RESTful serialization of AWS requests and responses. +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "math" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + floatNaN = "NaN" + floatInf = "Infinity" + floatNegInf = "-Infinity" +) + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool + +var errValueNotSet = fmt.Errorf("value not set") + +var byteSliceType = reflect.TypeOf([]byte{}) + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// BuildHandler is a named request handler for building rest protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} + +// Build builds the REST component of a service request. +func Build(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, false) + buildBody(r, v) + } +} + +// BuildAsGET builds the REST component of a service request with the ability to hoist +// data from the body. +func BuildAsGET(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, true) + buildBody(r, v) + } +} + +func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) { + query := r.HTTPRequest.URL.Query() + + // Setup the raw path to match the base path pattern. This is needed + // so that when the path is mutated a custom escaped version can be + // stored in RawPath that will be used by the Go client. + r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if kind := m.Kind(); kind == reflect.Ptr { + m = m.Elem() + } else if kind == reflect.Interface { + if !m.Elem().IsValid() { + continue + } + } + if !m.IsValid() { + continue + } + if field.Tag.Get("ignore") != "" { + continue + } + + // Support the ability to customize values to be marshaled as a + // blob even though they were modeled as a string. Required for S3 + // API operations like SSECustomerKey is modeled as string but + // required to be base64 encoded in request. + if field.Tag.Get("marshal-as") == "blob" { + m = m.Convert(byteSliceType) + } + + var err error + switch field.Tag.Get("location") { + case "headers": // header maps + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag) + case "header": + err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag) + case "uri": + err = buildURI(r.HTTPRequest.URL, m, name, field.Tag) + case "querystring": + err = buildQueryString(query, m, name, field.Tag) + default: + if buildGETQuery { + err = buildQueryString(query, m, name, field.Tag) + } + } + r.Error = err + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) { + cleanPath(r.HTTPRequest.URL) + } +} + +func buildBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetStringBody(reader) + default: + r.Error = awserr.New(request.ErrCodeSerialization, + "failed to encode REST request", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } +} + +func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error { + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + + name = strings.TrimSpace(name) + str = strings.TrimSpace(str) + + header.Add(name, str) + + return nil +} + +func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error { + prefix := tag.Get("locationName") + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key), tag) + if err == errValueNotSet { + continue + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + + } + keyStr := strings.TrimSpace(key.String()) + str = strings.TrimSpace(str) + + header.Add(prefix+keyStr, str) + } + return nil +} + +func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error { + value, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + + u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) + u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1) + + u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1) + u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1) + + return nil +} + +func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error { + switch value := v.Interface().(type) { + case []*string: + for _, item := range value { + query.Add(name, *item) + } + case map[string]*string: + for key, item := range value { + query.Add(key, *item) + } + case map[string][]*string: + for key, items := range value { + for _, item := range items { + query.Add(key, *item) + } + } + default: + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + query.Set(name, str) + } + + return nil +} + +func cleanPath(u *url.URL) { + hasSlash := strings.HasSuffix(u.Path, "/") + + // clean up path, removing duplicate `/` + u.Path = path.Clean(u.Path) + u.RawPath = path.Clean(u.RawPath) + + if hasSlash && !strings.HasSuffix(u.Path, "/") { + u.Path += "/" + u.RawPath += "/" + } +} + +// EscapePath escapes part of a URL path in Amazon style +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return "", errValueNotSet + } + + switch value := v.Interface().(type) { + case string: + if tag.Get("suppressedJSONValue") == "true" && tag.Get("location") == "header" { + value = base64.StdEncoding.EncodeToString([]byte(value)) + } + str = value + case []*string: + if tag.Get("location") != "header" || tag.Get("enum") == "" { + return "", fmt.Errorf("%T is only supported with location header and enum shapes", value) + } + if len(value) == 0 { + return "", errValueNotSet + } + + buff := &bytes.Buffer{} + for i, sv := range value { + if sv == nil || len(*sv) == 0 { + continue + } + if i != 0 { + buff.WriteRune(',') + } + item := *sv + if strings.Index(item, `,`) != -1 || strings.Index(item, `"`) != -1 { + item = strconv.Quote(item) + } + buff.WriteString(item) + } + str = string(buff.Bytes()) + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + switch { + case math.IsNaN(value): + str = floatNaN + case math.IsInf(value, 1): + str = floatInf + case math.IsInf(value, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(value, 'f', -1, 64) + } + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + if tag.Get("location") == "querystring" { + format = protocol.ISO8601TimeFormatName + } + } + str = protocol.FormatTime(format, value) + case aws.JSONValue: + if len(value) == 0 { + return "", errValueNotSet + } + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + str, err = protocol.EncodeJSONValue(value, escaping) + if err != nil { + return "", fmt.Errorf("unable to encode JSONValue, %v", err) + } + default: + err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type()) + return "", err + } + + return str, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go new file mode 100644 index 000000000..b54c99eda --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go @@ -0,0 +1,54 @@ +package rest + +import "reflect" + +// PayloadMember returns the payload field member of i if there is one, or nil. +func PayloadMember(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i).Elem() + if !v.IsValid() { + return nil + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + field, _ := v.Type().FieldByName(payloadName) + if field.Tag.Get("type") != "structure" { + return nil + } + + payload := v.FieldByName(payloadName) + if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { + return payload.Interface() + } + } + } + return nil +} + +const nopayloadPayloadType = "nopayload" + +// PayloadType returns the type of a payload field member of i if there is one, +// or "". +func PayloadType(i interface{}) string { + v := reflect.Indirect(reflect.ValueOf(i)) + if !v.IsValid() { + return "" + } + + if field, ok := v.Type().FieldByName("_"); ok { + if noPayload := field.Tag.Get(nopayloadPayloadType); noPayload != "" { + return nopayloadPayloadType + } + + if payloadName := field.Tag.Get("payload"); payloadName != "" { + if member, ok := v.Type().FieldByName(payloadName); ok { + return member.Tag.Get("type") + } + } + } + + return "" +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go new file mode 100644 index 000000000..79fcf1699 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -0,0 +1,276 @@ +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "math" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + awsStrings "github.com/aws/aws-sdk-go/internal/strings" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals the REST component of a response in a REST service. +func Unmarshal(r *request.Request) { + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + if err := unmarshalBody(r, v); err != nil { + r.Error = err + } + } +} + +// UnmarshalMeta unmarshals the REST metadata of a response in a REST service +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } + if r.DataFilled() { + if err := UnmarshalResponse(r.HTTPResponse, r.Data, aws.BoolValue(r.Config.LowerCaseHeaderMaps)); err != nil { + r.Error = err + } + } +} + +// UnmarshalResponse attempts to unmarshal the REST response headers to +// the data type passed in. The type must be a pointer. An error is returned +// with any error unmarshaling the response into the target datatype. +func UnmarshalResponse(resp *http.Response, data interface{}, lowerCaseHeaderMaps bool) error { + v := reflect.Indirect(reflect.ValueOf(data)) + return unmarshalLocationElements(resp, v, lowerCaseHeaderMaps) +} + +func unmarshalBody(r *request.Request, v reflect.Value) error { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := v.FieldByName(payloadName) + if payload.IsValid() { + switch payload.Interface().(type) { + case []byte: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + + payload.Set(reflect.ValueOf(b)) + + case *string: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + + str := string(b) + payload.Set(reflect.ValueOf(&str)) + + default: + switch payload.Type().String() { + case "io.ReadCloser": + payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + + case "io.ReadSeeker": + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + return awserr.New(request.ErrCodeSerialization, + "failed to read response body", err) + } + payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b)))) + + default: + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() + return awserr.New(request.ErrCodeSerialization, + "failed to decode REST response", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } + } + + return nil +} + +func unmarshalLocationElements(resp *http.Response, v reflect.Value, lowerCaseHeaderMaps bool) error { + for i := 0; i < v.NumField(); i++ { + m, field := v.Field(i), v.Type().Field(i) + if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + + switch field.Tag.Get("location") { + case "statusCode": + unmarshalStatusCode(m, resp.StatusCode) + + case "header": + err := unmarshalHeader(m, resp.Header.Get(name), field.Tag) + if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + + case "headers": + prefix := field.Tag.Get("locationName") + err := unmarshalHeaderMap(m, resp.Header, prefix, lowerCaseHeaderMaps) + if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + } + } + } + + return nil +} + +func unmarshalStatusCode(v reflect.Value, statusCode int) { + if !v.IsValid() { + return + } + + switch v.Interface().(type) { + case *int64: + s := int64(statusCode) + v.Set(reflect.ValueOf(&s)) + } +} + +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string, normalize bool) error { + if len(headers) == 0 { + return nil + } + switch r.Interface().(type) { + case map[string]*string: // we only support string map value types + out := map[string]*string{} + for k, v := range headers { + if awsStrings.HasPrefixFold(k, prefix) { + if normalize == true { + k = strings.ToLower(k) + } else { + k = http.CanonicalHeaderKey(k) + } + out[k[len(prefix):]] = &v[0] + } + } + if len(out) != 0 { + r.Set(reflect.ValueOf(out)) + } + + } + return nil +} + +func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { + switch tag.Get("type") { + case "jsonvalue": + if len(header) == 0 { + return nil + } + case "blob": + if len(header) == 0 { + return nil + } + default: + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } + } + + switch v.Interface().(type) { + case *string: + if tag.Get("suppressedJSONValue") == "true" && tag.Get("location") == "header" { + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return fmt.Errorf("failed to decode JSONValue, %v", err) + } + header = string(b) + } + v.Set(reflect.ValueOf(&header)) + case []byte: + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(b)) + case *bool: + b, err := strconv.ParseBool(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *int64: + i, err := strconv.ParseInt(header, 10, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&i)) + case *float64: + var f float64 + switch { + case strings.EqualFold(header, floatNaN): + f = math.NaN() + case strings.EqualFold(header, floatInf): + f = math.Inf(1) + case strings.EqualFold(header, floatNegInf): + f = math.Inf(-1) + default: + var err error + f, err = strconv.ParseFloat(header, 64) + if err != nil { + return err + } + } + v.Set(reflect.ValueOf(&f)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + } + t, err := protocol.ParseTime(format, header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + m, err := protocol.DecodeJSONValue(header, escaping) + if err != nil { + return err + } + v.Set(reflect.ValueOf(m)) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return err + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go new file mode 100644 index 000000000..2e0e205af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go @@ -0,0 +1,59 @@ +// Package restjson provides RESTful JSON serialization of AWS +// requests and responses. +package restjson + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/rest-json.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/rest-json.json unmarshal_test.go + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +// BuildHandler is a named request handler for building restjson protocol +// requests +var BuildHandler = request.NamedHandler{ + Name: "awssdk.restjson.Build", + Fn: Build, +} + +// UnmarshalHandler is a named request handler for unmarshaling restjson +// protocol requests +var UnmarshalHandler = request.NamedHandler{ + Name: "awssdk.restjson.Unmarshal", + Fn: Unmarshal, +} + +// UnmarshalMetaHandler is a named request handler for unmarshaling restjson +// protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{ + Name: "awssdk.restjson.UnmarshalMeta", + Fn: UnmarshalMeta, +} + +// Build builds a request for the REST JSON protocol. +func Build(r *request.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + if v := r.HTTPRequest.Header.Get("Content-Type"); len(v) == 0 { + r.HTTPRequest.Header.Set("Content-Type", "application/json") + } + jsonrpc.Build(r) + } +} + +// Unmarshal unmarshals a response body for the REST JSON protocol. +func Unmarshal(r *request.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + jsonrpc.Unmarshal(r) + } else { + rest.Unmarshal(r) + } +} + +// UnmarshalMeta unmarshals response headers for the REST JSON protocol. +func UnmarshalMeta(r *request.Request) { + rest.UnmarshalMeta(r) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go new file mode 100644 index 000000000..5366a646d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go @@ -0,0 +1,157 @@ +package restjson + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + errorTypeHeader = "X-Amzn-Errortype" + errorMessageHeader = "X-Amzn-Errormessage" +) + +// UnmarshalTypedError provides unmarshaling errors API response errors +// for both typed and untyped errors. +type UnmarshalTypedError struct { + exceptions map[string]func(protocol.ResponseMetadata) error +} + +// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the +// set of exception names to the error unmarshalers +func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError { + return &UnmarshalTypedError{ + exceptions: exceptions, + } +} + +// UnmarshalError attempts to unmarshal the HTTP response error as a known +// error type. If unable to unmarshal the error type, the generic SDK error +// type will be used. +func (u *UnmarshalTypedError) UnmarshalError( + resp *http.Response, + respMeta protocol.ResponseMetadata, +) (error, error) { + code, msg, err := unmarshalErrorInfo(resp) + if err != nil { + return nil, err + } + + fn, ok := u.exceptions[code] + if !ok { + return awserr.NewRequestFailure( + awserr.New(code, msg, nil), + respMeta.StatusCode, + respMeta.RequestID, + ), nil + } + + v := fn(respMeta) + if err := jsonutil.UnmarshalJSONCaseInsensitive(v, resp.Body); err != nil { + return nil, err + } + + if err := rest.UnmarshalResponse(resp, v, true); err != nil { + return nil, err + } + + return v, nil +} + +// UnmarshalErrorHandler is a named request handler for unmarshaling restjson +// protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{ + Name: "awssdk.restjson.UnmarshalError", + Fn: UnmarshalError, +} + +// UnmarshalError unmarshals a response error for the REST JSON protocol. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + code, msg, err := unmarshalErrorInfo(r.HTTPResponse) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "failed to unmarshal response error", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + r.Error = awserr.NewRequestFailure( + awserr.New(code, msg, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) +} + +type jsonErrorResponse struct { + Type string `json:"__type"` + Code string `json:"code"` + Message string `json:"message"` +} + +func (j *jsonErrorResponse) SanitizedCode() string { + code := j.Code + if len(j.Type) > 0 { + code = j.Type + } + return sanitizeCode(code) +} + +// Remove superfluous components from a restJson error code. +// - If a : character is present, then take only the contents before the +// first : character in the value. +// - If a # character is present, then take only the contents after the first +// # character in the value. +// +// All of the following error values resolve to FooError: +// - FooError +// - FooError:http://internal.amazon.com/coral/com.amazon.coral.validate/ +// - aws.protocoltests.restjson#FooError +// - aws.protocoltests.restjson#FooError:http://internal.amazon.com/coral/com.amazon.coral.validate/ +func sanitizeCode(code string) string { + noColon := strings.SplitN(code, ":", 2)[0] + hashSplit := strings.SplitN(noColon, "#", 2) + return hashSplit[len(hashSplit)-1] +} + +// attempt to garner error details from the response, preferring header values +// when present +func unmarshalErrorInfo(resp *http.Response) (code string, msg string, err error) { + code = sanitizeCode(resp.Header.Get(errorTypeHeader)) + msg = resp.Header.Get(errorMessageHeader) + if len(code) > 0 && len(msg) > 0 { + return + } + + // a modeled error will have to be re-deserialized later, so the body must + // be preserved + var buf bytes.Buffer + tee := io.TeeReader(resp.Body, &buf) + defer func() { resp.Body = ioutil.NopCloser(&buf) }() + + var jsonErr jsonErrorResponse + if decodeErr := json.NewDecoder(tee).Decode(&jsonErr); decodeErr != nil && decodeErr != io.EOF { + err = awserr.NewUnmarshalError(decodeErr, "failed to decode response body", buf.Bytes()) + return + } + + if len(code) == 0 { + code = jsonErr.SanitizedCode() + } + if len(msg) == 0 { + msg = jsonErr.Message + } + return +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go new file mode 100644 index 000000000..b1ae36487 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -0,0 +1,79 @@ +// Package restxml provides RESTful XML serialization of AWS +// requests and responses. +package restxml + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/rest-xml.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go + +import ( + "bytes" + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/protocol/rest" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// BuildHandler is a named request handler for building restxml protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.restxml.Build", Fn: Build} + +// UnmarshalHandler is a named request handler for unmarshaling restxml protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restxml.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling restxml protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalMeta", Fn: UnmarshalMeta} + +// UnmarshalErrorHandler is a named request handler for unmarshaling restxml protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalError", Fn: UnmarshalError} + +// Build builds a request payload for the REST XML protocol. +func Build(r *request.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + var buf bytes.Buffer + err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to encode rest XML request", err), + 0, + r.RequestID, + ) + return + } + r.SetBufferBody(buf.Bytes()) + } +} + +// Unmarshal unmarshals a payload response for the REST XML protocol. +func Unmarshal(r *request.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + defer r.HTTPResponse.Body.Close() + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, "") + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to decode REST XML response", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + } else { + rest.Unmarshal(r) + } +} + +// UnmarshalMeta unmarshals response headers for the REST XML protocol. +func UnmarshalMeta(r *request.Request) { + rest.UnmarshalMeta(r) +} + +// UnmarshalError unmarshals a response error for the REST XML protocol. +func UnmarshalError(r *request.Request) { + query.UnmarshalError(r) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go new file mode 100644 index 000000000..d9a4e7649 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -0,0 +1,134 @@ +package protocol + +import ( + "bytes" + "fmt" + "math" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/internal/sdkmath" +) + +// Names of time formats supported by the SDK +const ( + RFC822TimeFormatName = "rfc822" + ISO8601TimeFormatName = "iso8601" + UnixTimeFormatName = "unixTimestamp" +) + +// Time formats supported by the SDK +// Output time is intended to not contain decimals +const ( + // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT + RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + rfc822TimeFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT" + rfc822TimeFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT" + + // This format is used for output time without seconds precision + RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" + + // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z + ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z" + iso8601TimeFormatNoZ = "2006-01-02T15:04:05.999999999" + + // This format is used for output time with fractional second precision up to milliseconds + ISO8601OutputTimeFormat = "2006-01-02T15:04:05.999999999Z" +) + +// IsKnownTimestampFormat returns if the timestamp format name +// is know to the SDK's protocols. +func IsKnownTimestampFormat(name string) bool { + switch name { + case RFC822TimeFormatName: + fallthrough + case ISO8601TimeFormatName: + fallthrough + case UnixTimeFormatName: + return true + default: + return false + } +} + +// FormatTime returns a string value of the time. +func FormatTime(name string, t time.Time) string { + t = t.UTC().Truncate(time.Millisecond) + + switch name { + case RFC822TimeFormatName: + return t.Format(RFC822OutputTimeFormat) + case ISO8601TimeFormatName: + return t.Format(ISO8601OutputTimeFormat) + case UnixTimeFormatName: + ms := t.UnixNano() / int64(time.Millisecond) + return strconv.FormatFloat(float64(ms)/1e3, 'f', -1, 64) + default: + panic("unknown timestamp format name, " + name) + } +} + +// ParseTime attempts to parse the time given the format. Returns +// the time if it was able to be parsed, and fails otherwise. +func ParseTime(formatName, value string) (time.Time, error) { + switch formatName { + case RFC822TimeFormatName: // Smithy HTTPDate format + return tryParse(value, + RFC822TimeFormat, + rfc822TimeFormatSingleDigitDay, + rfc822TimeFormatSingleDigitDayTwoDigitYear, + time.RFC850, + time.ANSIC, + ) + case ISO8601TimeFormatName: // Smithy DateTime format + return tryParse(value, + ISO8601TimeFormat, + iso8601TimeFormatNoZ, + time.RFC3339Nano, + time.RFC3339, + ) + case UnixTimeFormatName: + v, err := strconv.ParseFloat(value, 64) + _, dec := math.Modf(v) + dec = sdkmath.Round(dec*1e3) / 1e3 //Rounds 0.1229999 to 0.123 + if err != nil { + return time.Time{}, err + } + return time.Unix(int64(v), int64(dec*(1e9))), nil + default: + panic("unknown timestamp format name, " + formatName) + } +} + +func tryParse(v string, formats ...string) (time.Time, error) { + var errs parseErrors + for _, f := range formats { + t, err := time.Parse(f, v) + if err != nil { + errs = append(errs, parseError{ + Format: f, + Err: err, + }) + continue + } + return t, nil + } + + return time.Time{}, fmt.Errorf("unable to parse time string, %v", errs) +} + +type parseErrors []parseError + +func (es parseErrors) Error() string { + var s bytes.Buffer + for _, e := range es { + fmt.Fprintf(&s, "\n * %q: %v", e.Format, e.Err) + } + + return "parse errors:" + s.String() +} + +type parseError struct { + Format string + Err error +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go new file mode 100644 index 000000000..f614ef898 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go @@ -0,0 +1,27 @@ +package protocol + +import ( + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body +var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} + +// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. +func UnmarshalDiscardBody(r *request.Request) { + if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { + return + } + + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() +} + +// ResponseMetadata provides the SDK response metadata attributes. +type ResponseMetadata struct { + StatusCode int + RequestID string +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go new file mode 100644 index 000000000..cc857f136 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go @@ -0,0 +1,65 @@ +package protocol + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalErrorHandler provides unmarshaling errors API response errors for +// both typed and untyped errors. +type UnmarshalErrorHandler struct { + unmarshaler ErrorUnmarshaler +} + +// ErrorUnmarshaler is an abstract interface for concrete implementations to +// unmarshal protocol specific response errors. +type ErrorUnmarshaler interface { + UnmarshalError(*http.Response, ResponseMetadata) (error, error) +} + +// NewUnmarshalErrorHandler returns an UnmarshalErrorHandler +// initialized for the set of exception names to the error unmarshalers +func NewUnmarshalErrorHandler(unmarshaler ErrorUnmarshaler) *UnmarshalErrorHandler { + return &UnmarshalErrorHandler{ + unmarshaler: unmarshaler, + } +} + +// UnmarshalErrorHandlerName is the name of the named handler. +const UnmarshalErrorHandlerName = "awssdk.protocol.UnmarshalError" + +// NamedHandler returns a NamedHandler for the unmarshaler using the set of +// errors the unmarshaler was initialized for. +func (u *UnmarshalErrorHandler) NamedHandler() request.NamedHandler { + return request.NamedHandler{ + Name: UnmarshalErrorHandlerName, + Fn: u.UnmarshalError, + } +} + +// UnmarshalError will attempt to unmarshal the API response's error message +// into either a generic SDK error type, or a typed error corresponding to the +// errors exception name. +func (u *UnmarshalErrorHandler) UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + respMeta := ResponseMetadata{ + StatusCode: r.HTTPResponse.StatusCode, + RequestID: r.RequestID, + } + + v, err := u.unmarshaler.UnmarshalError(r.HTTPResponse, respMeta) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal response error", err), + respMeta.StatusCode, + respMeta.RequestID, + ) + return + } + + r.Error = v +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go new file mode 100644 index 000000000..58c12bd8c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -0,0 +1,345 @@ +// Package xmlutil provides XML serialization of AWS requests and responses. +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + floatNaN = "NaN" + floatInf = "Infinity" + floatNegInf = "-Infinity" +) + +// BuildXML will serialize params into an xml.Encoder. Error will be returned +// if the serialization of any of the params or nested values fails. +func BuildXML(params interface{}, e *xml.Encoder) error { + return buildXML(params, e, false) +} + +func buildXML(params interface{}, e *xml.Encoder, sorted bool) error { + b := xmlBuilder{encoder: e, namespaces: map[string]string{}} + root := NewXMLElement(xml.Name{}) + if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { + return err + } + for _, c := range root.Children { + for _, v := range c { + return StructToXML(e, v, sorted) + } + } + return nil +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +// A xmlBuilder serializes values from Go code to XML +type xmlBuilder struct { + encoder *xml.Encoder + namespaces map[string]string +} + +// buildValue generic XMLNode builder for any type. Will build value for their specific type +// struct, list, map, scalar. +// +// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If +// type is not provided reflect will be used to determine the value's type. +func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + value = elemOf(value) + if !value.IsValid() { // no need to handle zero values + return nil + } else if tag.Get("location") != "" { // don't handle non-body location values + return nil + } + + xml := tag.Get("xml") + if len(xml) != 0 { + name := strings.SplitAfterN(xml, ",", 2)[0] + if name == "-" { + return nil + } + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := value.Type().FieldByName("_"); ok { + tag = tag + reflect.StructTag(" ") + field.Tag + } + return b.buildStruct(value, current, tag) + case "list": + return b.buildList(value, current, tag) + case "map": + return b.buildMap(value, current, tag) + default: + return b.buildScalar(value, current, tag) + } +} + +// buildStruct adds a struct and its fields to the current XMLNode. All fields and any nested +// types are converted to XMLNodes also. +func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + + // there is an xmlNamespace associated with this struct + if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { + ns := xml.Attr{ + Name: xml.Name{Local: "xmlns"}, + Value: uri, + } + if prefix != "" { + b.namespaces[prefix] = uri // register the namespace + ns.Name.Local = "xmlns:" + prefix + } + + child.Attr = append(child.Attr, ns) + } + + var payloadFields, nonPayloadFields int + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + member := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("ignore") != "" { + continue + } + + mTag := field.Tag + if mTag.Get("location") != "" { // skip non-body members + nonPayloadFields++ + continue + } + payloadFields++ + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(token) + } + + memberName := mTag.Get("locationName") + if memberName == "" { + memberName = field.Name + mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) + } + if err := b.buildValue(member, child, mTag); err != nil { + return err + } + } + + // Only case where the child shape is not added is if the shape only contains + // non-payload fields, e.g headers/query. + if !(payloadFields == 0 && nonPayloadFields > 0) { + current.AddChild(child) + } + + return nil +} + +// buildList adds the value's list items to the current XMLNode as children nodes. All +// nested values in the list are converted to XMLNodes also. +func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted lists + return nil + } + + // check for unflattened list member + flattened := tag.Get("flattened") != "" + + xname := xml.Name{Local: tag.Get("locationName")} + if flattened { + for i := 0; i < value.Len(); i++ { + child := NewXMLElement(xname) + current.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } else { + list := NewXMLElement(xname) + current.AddChild(list) + + for i := 0; i < value.Len(); i++ { + iname := tag.Get("locationNameList") + if iname == "" { + iname = "member" + } + + child := NewXMLElement(xml.Name{Local: iname}) + list.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } + + return nil +} + +// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All +// nested values in the map are converted to XMLNodes also. +// +// Error will be returned if it is unable to build the map's values into XMLNodes +func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted maps + return nil + } + + maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + current.AddChild(maproot) + current = maproot + + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + // sorting is not required for compliance, but it makes testing easier + keys := make([]string, value.Len()) + for i, k := range value.MapKeys() { + keys[i] = k.String() + } + sort.Strings(keys) + + for _, k := range keys { + v := value.MapIndex(reflect.ValueOf(k)) + + mapcur := current + if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps + child := NewXMLElement(xml.Name{Local: "entry"}) + mapcur.AddChild(child) + mapcur = child + } + + kchild := NewXMLElement(xml.Name{Local: kname}) + kchild.Text = k + vchild := NewXMLElement(xml.Name{Local: vname}) + mapcur.AddChild(kchild) + mapcur.AddChild(vchild) + + if err := b.buildValue(v, vchild, ""); err != nil { + return err + } + } + + return nil +} + +// buildScalar will convert the value into a string and append it as a attribute or child +// of the current XMLNode. +// +// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. +// +// Error will be returned if the value type is unsupported. +func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + var str string + + switch converted := value.Interface().(type) { + case string: + str = converted + case []byte: + if !value.IsNil() { + str = base64.StdEncoding.EncodeToString(converted) + } + case bool: + str = strconv.FormatBool(converted) + case int64: + str = strconv.FormatInt(converted, 10) + case int: + str = strconv.Itoa(converted) + case float64: + switch { + case math.IsNaN(converted): + str = floatNaN + case math.IsInf(converted, 1): + str = floatInf + case math.IsInf(converted, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(converted, 'f', -1, 64) + } + case float32: + // The SDK doesn't render float32 values in types, only float64. This case would never be hit currently. + asFloat64 := float64(converted) + switch { + case math.IsNaN(asFloat64): + str = floatNaN + case math.IsInf(asFloat64, 1): + str = floatInf + case math.IsInf(asFloat64, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(asFloat64, 'f', -1, 32) + } + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + str = protocol.FormatTime(format, converted) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", + tag.Get("locationName"), value.Interface(), value.Type().Name()) + } + + xname := xml.Name{Local: tag.Get("locationName")} + if tag.Get("xmlAttribute") != "" { // put into current node's attribute list + attr := xml.Attr{Name: xname, Value: str} + current.Attr = append(current.Attr, attr) + } else if len(xname.Local) == 0 { + current.Text = str + } else { // regular text node + current.AddChild(&XMLNode{Name: xname, Text: str}) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go new file mode 100644 index 000000000..c1a511851 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go @@ -0,0 +1,32 @@ +package xmlutil + +import ( + "encoding/xml" + "strings" +) + +type xmlAttrSlice []xml.Attr + +func (x xmlAttrSlice) Len() int { + return len(x) +} + +func (x xmlAttrSlice) Less(i, j int) bool { + spaceI, spaceJ := x[i].Name.Space, x[j].Name.Space + localI, localJ := x[i].Name.Local, x[j].Name.Local + valueI, valueJ := x[i].Value, x[j].Value + + spaceCmp := strings.Compare(spaceI, spaceJ) + localCmp := strings.Compare(localI, localJ) + valueCmp := strings.Compare(valueI, valueJ) + + if spaceCmp == -1 || (spaceCmp == 0 && (localCmp == -1 || (localCmp == 0 && valueCmp == -1))) { + return true + } + + return false +} + +func (x xmlAttrSlice) Swap(i, j int) { + x[i], x[j] = x[j], x[i] +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go new file mode 100644 index 000000000..44a580a94 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -0,0 +1,311 @@ +package xmlutil + +import ( + "bytes" + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalXMLError unmarshals the XML error from the stream into the value +// type specified. The value must be a pointer. If the message fails to +// unmarshal, the message content will be included in the returned error as a +// awserr.UnmarshalError. +func UnmarshalXMLError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := xml.NewDecoder(body).Decode(v) + if err != nil && err != io.EOF { + return awserr.NewUnmarshalError(err, + "failed to unmarshal error message", errBuf.Bytes()) + } + + return nil +} + +// UnmarshalXML deserializes an xml.Decoder into the container v. V +// needs to match the shape of the XML expected to be decoded. +// If the shape doesn't match unmarshaling will fail. +func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { + n, err := XMLToStruct(d, nil) + if err != nil { + return err + } + if n.Children != nil { + for _, root := range n.Children { + for _, c := range root { + if wrappedChild, ok := c.Children[wrapper]; ok { + c = wrappedChild[0] // pull out wrapped element + } + + err = parse(reflect.ValueOf(v), c, "") + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + return nil + } + return nil +} + +// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect +// will be used to determine the type from r. +func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + xml := tag.Get("xml") + if len(xml) != 0 { + name := strings.SplitAfterN(xml, ",", 2)[0] + if name == "-" { + return nil + } + } + + rtype := r.Type() + if rtype.Kind() == reflect.Ptr { + rtype = rtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch rtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := r.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := r.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := rtype.FieldByName("_"); ok { + tag = field.Tag + } + return parseStruct(r, node, tag) + case "list": + return parseList(r, node, tag) + case "map": + return parseMap(r, node, tag) + default: + return parseScalar(r, node, tag) + } +} + +// parseStruct deserializes a structure and its fields from an XMLNode. Any nested +// types in the structure will also be deserialized. +func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + if r.Kind() == reflect.Ptr { + if r.IsNil() { // create the structure if it's nil + s := reflect.New(r.Type().Elem()) + r.Set(s) + r = s + } + + r = r.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return parseStruct(r.FieldByName(payload), node, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if c := field.Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + // try to find the field by name in elements + elems := node.Children[name] + + if elems == nil { // try to find the field in attributes + if val, ok := node.findElem(name); ok { + elems = []*XMLNode{{Text: val}} + } + } + + member := r.FieldByName(field.Name) + for _, elem := range elems { + err := parse(member, elem, field.Tag) + if err != nil { + return err + } + } + } + return nil +} + +// parseList deserializes a list of values from an XML node. Each list entry +// will also be deserialized. +func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + + if tag.Get("flattened") == "" { // look at all item entries + mname := "member" + if name := tag.Get("locationNameList"); name != "" { + mname = name + } + + if Children, ok := node.Children[mname]; ok { + if r.IsNil() { + r.Set(reflect.MakeSlice(t, len(Children), len(Children))) + } + + for i, c := range Children { + err := parse(r.Index(i), c, "") + if err != nil { + return err + } + } + } + } else { // flattened list means this is a single element + if r.IsNil() { + r.Set(reflect.MakeSlice(t, 0, 0)) + } + + childR := reflect.Zero(t.Elem()) + r.Set(reflect.Append(r, childR)) + err := parse(r.Index(r.Len()-1), node, "") + if err != nil { + return err + } + } + + return nil +} + +// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode +// will also be deserialized as map entries. +func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + if r.IsNil() { + r.Set(reflect.MakeMap(r.Type())) + } + + if tag.Get("flattened") == "" { // look at all child entries + for _, entry := range node.Children["entry"] { + parseMapEntry(r, entry, tag) + } + } else { // this element is itself an entry + parseMapEntry(r, node, tag) + } + + return nil +} + +// parseMapEntry deserializes a map entry from a XML node. +func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + keys, ok := node.Children[kname] + values := node.Children[vname] + if ok { + for i, key := range keys { + keyR := reflect.ValueOf(key.Text) + value := values[i] + valueR := reflect.New(r.Type().Elem()).Elem() + + parse(valueR, value, "") + r.SetMapIndex(keyR, valueR) + } + } + return nil +} + +// parseScaller deserializes an XMLNode value into a concrete type based on the +// interface type of r. +// +// Error is returned if the deserialization fails due to invalid type conversion, +// or unsupported interface type. +func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + switch r.Interface().(type) { + case *string: + r.Set(reflect.ValueOf(&node.Text)) + return nil + case []byte: + b, err := base64.StdEncoding.DecodeString(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(b)) + case *bool: + v, err := strconv.ParseBool(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *int64: + v, err := strconv.ParseInt(node.Text, 10, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *float64: + var v float64 + switch { + case strings.EqualFold(node.Text, floatNaN): + v = math.NaN() + case strings.EqualFold(node.Text, floatInf): + v = math.Inf(1) + case strings.EqualFold(node.Text, floatNegInf): + v = math.Inf(-1) + default: + var err error + v, err = strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } + } + r.Set(reflect.ValueOf(&v)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go new file mode 100644 index 000000000..c85b79fdd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -0,0 +1,173 @@ +package xmlutil + +import ( + "encoding/xml" + "fmt" + "io" + "sort" +) + +// A XMLNode contains the values to be encoded or decoded. +type XMLNode struct { + Name xml.Name `json:",omitempty"` + Children map[string][]*XMLNode `json:",omitempty"` + Text string `json:",omitempty"` + Attr []xml.Attr `json:",omitempty"` + + namespaces map[string]string + parent *XMLNode +} + +// textEncoder is a string type alias that implemnts the TextMarshaler interface. +// This alias type is used to ensure that the line feed (\n) (U+000A) is escaped. +type textEncoder string + +func (t textEncoder) MarshalText() ([]byte, error) { + return []byte(t), nil +} + +// NewXMLElement returns a pointer to a new XMLNode initialized to default values. +func NewXMLElement(name xml.Name) *XMLNode { + return &XMLNode{ + Name: name, + Children: map[string][]*XMLNode{}, + Attr: []xml.Attr{}, + } +} + +// AddChild adds child to the XMLNode. +func (n *XMLNode) AddChild(child *XMLNode) { + child.parent = n + if _, ok := n.Children[child.Name.Local]; !ok { + n.Children[child.Name.Local] = []*XMLNode{} + } + n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) +} + +// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. +func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { + out := &XMLNode{} + for { + tok, err := d.Token() + if err != nil { + if err == io.EOF { + break + } else { + return out, err + } + } + + if tok == nil { + break + } + + switch typed := tok.(type) { + case xml.CharData: + out.Text = string(typed.Copy()) + case xml.StartElement: + el := typed.Copy() + out.Attr = el.Attr + if out.Children == nil { + out.Children = map[string][]*XMLNode{} + } + + name := typed.Name.Local + slice := out.Children[name] + if slice == nil { + slice = []*XMLNode{} + } + node, e := XMLToStruct(d, &el) + out.findNamespaces() + if e != nil { + return out, e + } + node.Name = typed.Name + node.findNamespaces() + tempOut := *out + // Save into a temp variable, simply because out gets squashed during + // loop iterations + node.parent = &tempOut + slice = append(slice, node) + out.Children[name] = slice + case xml.EndElement: + if s != nil && s.Name.Local == typed.Name.Local { // matching end token + return out, nil + } + out = &XMLNode{} + } + } + return out, nil +} + +func (n *XMLNode) findNamespaces() { + ns := map[string]string{} + for _, a := range n.Attr { + if a.Name.Space == "xmlns" { + ns[a.Value] = a.Name.Local + } + } + + n.namespaces = ns +} + +func (n *XMLNode) findElem(name string) (string, bool) { + for node := n; node != nil; node = node.parent { + for _, a := range node.Attr { + namespace := a.Name.Space + if v, ok := node.namespaces[namespace]; ok { + namespace = v + } + if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) { + return a.Value, true + } + } + } + return "", false +} + +// StructToXML writes an XMLNode to a xml.Encoder as tokens. +func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { + // Sort Attributes + attrs := node.Attr + if sorted { + sortedAttrs := make([]xml.Attr, len(attrs)) + for _, k := range node.Attr { + sortedAttrs = append(sortedAttrs, k) + } + sort.Sort(xmlAttrSlice(sortedAttrs)) + attrs = sortedAttrs + } + + startElement := xml.StartElement{Name: node.Name, Attr: attrs} + + if node.Text != "" { + e.EncodeElement(textEncoder(node.Text), startElement) + return e.Flush() + } + + e.EncodeToken(startElement) + + if sorted { + sortedNames := []string{} + for k := range node.Children { + sortedNames = append(sortedNames, k) + } + sort.Strings(sortedNames) + + for _, k := range sortedNames { + for _, v := range node.Children[k] { + StructToXML(e, v, sorted) + } + } + } else { + for _, c := range node.Children { + for _, v := range c { + StructToXML(e, v, sorted) + } + } + } + + e.EncodeToken(startElement.End()) + + return e.Flush() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go new file mode 100644 index 000000000..f1fa8dcf0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -0,0 +1,45973 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "bytes" + "fmt" + "io" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" + "github.com/aws/aws-sdk-go/private/checksum" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" + "github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi" + "github.com/aws/aws-sdk-go/private/protocol/rest" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +const opAbortMultipartUpload = "AbortMultipartUpload" + +// AbortMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the AbortMultipartUpload operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AbortMultipartUpload for more information on using the AbortMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the AbortMultipartUploadRequest method. +// req, resp := client.AbortMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { + op := &request.Operation{ + Name: opAbortMultipartUpload, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &AbortMultipartUploadInput{} + } + + output = &AbortMultipartUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// AbortMultipartUpload API operation for Amazon Simple Storage Service. +// +// This operation aborts a multipart upload. After a multipart upload is aborted, +// no additional parts can be uploaded using that upload ID. The storage consumed +// by any previously uploaded parts will be freed. However, if any part uploads +// are currently in progress, those part uploads might or might not succeed. +// As a result, it might be necessary to abort a given multipart upload multiple +// times in order to completely free all storage consumed by all parts. +// +// To verify that all parts have been removed and prevent getting charged for +// the part storage, you should call the ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// API operation and ensure that the parts list is empty. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - For information about permissions +// required to use the multipart upload, see Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// The following operations are related to AbortMultipartUpload: +// +// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation AbortMultipartUpload for usage and error information. +// +// Returned Error Codes: +// - ErrCodeNoSuchUpload "NoSuchUpload" +// The specified multipart upload does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + return out, req.Send() +} + +// AbortMultipartUploadWithContext is the same as AbortMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See AbortMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) AbortMultipartUploadWithContext(ctx aws.Context, input *AbortMultipartUploadInput, opts ...request.Option) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCompleteMultipartUpload = "CompleteMultipartUpload" + +// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CompleteMultipartUpload operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CompleteMultipartUpload for more information on using the CompleteMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CompleteMultipartUploadRequest method. +// req, resp := client.CompleteMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) { + op := &request.Operation{ + Name: opCompleteMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CompleteMultipartUploadInput{} + } + + output = &CompleteMultipartUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// CompleteMultipartUpload API operation for Amazon Simple Storage Service. +// +// Completes a multipart upload by assembling previously uploaded parts. +// +// You first initiate the multipart upload and then upload all parts using the +// UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// operation or the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// operation. After successfully uploading all relevant parts of an upload, +// you call this CompleteMultipartUpload operation to complete the upload. Upon +// receiving this request, Amazon S3 concatenates all the parts in ascending +// order by part number to create a new object. In the CompleteMultipartUpload +// request, you must provide the parts list and ensure that the parts list is +// complete. The CompleteMultipartUpload API operation concatenates the parts +// that you provide in the list. For each part in the list, you must provide +// the PartNumber value and the ETag value that are returned after that part +// was uploaded. +// +// The processing of a CompleteMultipartUpload request could take several minutes +// to finalize. After Amazon S3 begins processing the request, it sends an HTTP +// response header that specifies a 200 OK response. While processing is in +// progress, Amazon S3 periodically sends white space characters to keep the +// connection from timing out. A request could fail after the initial 200 OK +// response has been sent. This means that a 200 OK response can contain either +// a success or an error. The error response might be embedded in the 200 OK +// response. If you call this API operation directly, make sure to design your +// application to parse the contents of the response and handle it appropriately. +// If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs +// detect the embedded error and apply error handling per your configuration +// settings (including automatically retrying the request as appropriate). If +// the condition persists, the SDKs throw an exception (or, for the SDKs that +// don't use exceptions, they return an error). +// +// Note that if CompleteMultipartUpload fails, applications should be prepared +// to retry any failed requests (including 500 error responses). For more information, +// see Amazon S3 Error Best Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). +// +// You can't use Content-Type: application/x-www-form-urlencoded for the CompleteMultipartUpload +// requests. Also, if you don't provide a Content-Type header, CompleteMultipartUpload +// can still return a 200 OK response. +// +// For more information about multipart uploads, see Uploading Objects Using +// Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - For information about permissions +// required to use the multipart upload API, see Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// Special errors +// +// - Error Code: EntityTooSmall Description: Your proposed upload is smaller +// than the minimum allowed object size. Each part must be at least 5 MB +// in size, except the last part. HTTP Status Code: 400 Bad Request +// +// - Error Code: InvalidPart Description: One or more of the specified parts +// could not be found. The part might not have been uploaded, or the specified +// ETag might not have matched the uploaded part's ETag. HTTP Status Code: +// 400 Bad Request +// +// - Error Code: InvalidPartOrder Description: The list of parts was not +// in ascending order. The parts list must be specified in order by part +// number. HTTP Status Code: 400 Bad Request +// +// - Error Code: NoSuchUpload Description: The specified multipart upload +// does not exist. The upload ID might be invalid, or the multipart upload +// might have been aborted or completed. HTTP Status Code: 404 Not Found +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// The following operations are related to CompleteMultipartUpload: +// +// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CompleteMultipartUpload for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + return out, req.Send() +} + +// CompleteMultipartUploadWithContext is the same as CompleteMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CompleteMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CompleteMultipartUploadWithContext(ctx aws.Context, input *CompleteMultipartUploadInput, opts ...request.Option) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCopyObject = "CopyObject" + +// CopyObjectRequest generates a "aws/request.Request" representing the +// client's request for the CopyObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CopyObject for more information on using the CopyObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CopyObjectRequest method. +// req, resp := client.CopyObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) { + op := &request.Operation{ + Name: opCopyObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CopyObjectInput{} + } + + output = &CopyObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// CopyObject API operation for Amazon Simple Storage Service. +// +// Creates a copy of an object that is already stored in Amazon S3. +// +// You can store individual objects of up to 5 TB in Amazon S3. You create a +// copy of your object up to 5 GB in size in a single atomic action using this +// API. However, to copy an object greater than 5 GB, you must use the multipart +// upload Upload Part - Copy (UploadPartCopy) API. For more information, see +// Copy Object Using the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). +// +// You can copy individual objects between general purpose buckets, between +// directory buckets, and between general purpose buckets and directory buckets. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Both the Region that you want to copy the object from and the Region that +// you want to copy the object to must be enabled for your account. For more +// information about how to enable a Region for your account, see Enable or +// disable a Region for standalone accounts (https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-regions.html#manage-acct-regions-enable-standalone) +// in the Amazon Web Services Account Management Guide. +// +// Amazon S3 transfer acceleration does not support cross-Region copies. If +// you request a cross-Region copy using a transfer acceleration endpoint, you +// get a 400 Bad Request error. For more information, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). +// +// # Authentication and authorization +// +// All CopyObject requests must be authenticated and signed by using IAM credentials +// (access key ID and secret access key for the IAM identities). All headers +// with the x-amz- prefix, including x-amz-copy-source, must be signed. For +// more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). +// +// Directory buckets - You must use the IAM credentials to authenticate and +// authorize your access to the CopyObject API operation, instead of using the +// temporary security credentials through the CreateSession API operation. +// +// Amazon Web Services CLI or SDKs handles authentication and authorization +// on your behalf. +// +// # Permissions +// +// You must have read access to the source object and write access to the destination +// bucket. +// +// - General purpose bucket permissions - You must have permissions in an +// IAM policy based on the source and destination bucket types in a CopyObject +// operation. If the source object is in a general purpose bucket, you must +// have s3:GetObject permission to read the source object that is being copied. +// If the destination bucket is a general purpose bucket, you must have s3:PutObject +// permission to write the object copy to the destination bucket. +// +// - Directory bucket permissions - You must have permissions in a bucket +// policy or an IAM identity-based policy based on the source and destination +// bucket types in a CopyObject operation. If the source object that you +// want to copy is in a directory bucket, you must have the s3express:CreateSession +// permission in the Action element of a policy to read the object. By default, +// the session is in the ReadWrite mode. If you want to restrict the access, +// you can explicitly set the s3express:SessionMode condition key to ReadOnly +// on the copy source bucket. If the copy destination is a directory bucket, +// you must have the s3express:CreateSession permission in the Action element +// of a policy to write the object to the destination. The s3express:SessionMode +// condition key can't be set to ReadOnly on the copy destination bucket. +// For example policies, see Example bucket policies for S3 Express One Zone +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// and Amazon Web Services Identity and Access Management (IAM) identity-based +// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) +// in the Amazon S3 User Guide. +// +// # Response and special errors +// +// When the request is an HTTP 1.1 request, the response is chunk encoded. When +// the request is not an HTTP 1.1 request, the response would not contain the +// Content-Length. You always need to read the entire response body to check +// if the copy succeeds. +// +// - If the copy is successful, you receive a response with information about +// the copied object. +// +// - A copy request might return an error when Amazon S3 receives the copy +// request or while Amazon S3 is copying the files. A 200 OK response can +// contain either a success or an error. If the error occurs before the copy +// action starts, you receive a standard Amazon S3 error. If the error occurs +// during the copy operation, the error response is embedded in the 200 OK +// response. For example, in a cross-region copy, you may encounter throttling +// and receive a 200 OK response. For more information, see Resolve the Error +// 200 response when copying objects to Amazon S3 (https://repost.aws/knowledge-center/s3-resolve-200-internalerror). +// The 200 OK status code means the copy was accepted, but it doesn't mean +// the copy is complete. Another example is when you disconnect from Amazon +// S3 before the copy is complete, Amazon S3 might cancel the copy and you +// may receive a 200 OK response. You must stay connected to Amazon S3 until +// the entire response is successfully received and processed. If you call +// this API operation directly, make sure to design your application to parse +// the content of the response and handle it appropriately. If you use Amazon +// Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded +// error and apply error handling per your configuration settings (including +// automatically retrying the request as appropriate). If the condition persists, +// the SDKs throw an exception (or, for the SDKs that don't use exceptions, +// they return an error). +// +// # Charge +// +// The copy request charge is based on the storage class and Region that you +// specify for the destination object. The request can also result in a data +// retrieval charge for the source if the source storage class bills for data +// retrieval. If the copy source is in a different region, the data transfer +// is billed to the copy source account. For pricing information, see Amazon +// S3 pricing (http://aws.amazon.com/s3/pricing/). +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// The following operations are related to CopyObject: +// +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CopyObject for usage and error information. +// +// Returned Error Codes: +// - ErrCodeObjectNotInActiveTierError "ObjectNotInActiveTierError" +// The source object of the COPY action is not in the active tier and is only +// stored in Amazon S3 Glacier. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + return out, req.Send() +} + +// CopyObjectWithContext is the same as CopyObject with the addition of +// the ability to pass a context and additional request options. +// +// See CopyObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CopyObjectWithContext(ctx aws.Context, input *CopyObjectInput, opts ...request.Option) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateBucket = "CreateBucket" + +// CreateBucketRequest generates a "aws/request.Request" representing the +// client's request for the CreateBucket operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateBucket for more information on using the CreateBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateBucketRequest method. +// req, resp := client.CreateBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { + op := &request.Operation{ + Name: opCreateBucket, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &CreateBucketInput{} + } + + output = &CreateBucketOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateBucket API operation for Amazon Simple Storage Service. +// +// This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts +// bucket, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html). +// +// Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and +// have a valid Amazon Web Services Access Key ID to authenticate requests. +// Anonymous requests are never allowed to create buckets. By creating the bucket, +// you become the bucket owner. +// +// There are two types of buckets: general purpose buckets and directory buckets. +// For more information about these bucket types, see Creating, configuring, +// and working with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html) +// in the Amazon S3 User Guide. +// +// - General purpose buckets - If you send your CreateBucket request to the +// s3.amazonaws.com global endpoint, the request goes to the us-east-1 Region. +// So the signature calculations in Signature Version 4 must use us-east-1 +// as the Region, even if the location constraint in the request specifies +// another Region where the bucket is to be created. If you create a bucket +// in a Region other than US East (N. Virginia), your application must be +// able to handle 307 redirect. For more information, see Virtual hosting +// of buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html) +// in the Amazon S3 User Guide. +// +// - Directory buckets - For directory buckets, you must make requests for +// this API operation to the Regional endpoint. These endpoints support path-style +// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information, +// see Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - In addition to the s3:CreateBucket +// permission, the following permissions are required in a policy when your +// CreateBucket request includes specific headers: Access control lists (ACLs) +// +// - In your CreateBucket request, if you specify an access control list +// (ACL) and set it to public-read, public-read-write, authenticated-read, +// or if you explicitly specify any other custom ACLs, both s3:CreateBucket +// and s3:PutBucketAcl permissions are required. In your CreateBucket request, +// if you set the ACL to private, or if you don't specify any ACLs, only +// the s3:CreateBucket permission is required. Object Lock - In your CreateBucket +// request, if you set x-amz-bucket-object-lock-enabled to true, the s3:PutBucketObjectLockConfiguration +// and s3:PutBucketVersioning permissions are required. S3 Object Ownership +// +// - If your CreateBucket request includes the x-amz-object-ownership header, +// then the s3:PutBucketOwnershipControls permission is required. To set +// an ACL on a bucket as part of a CreateBucket request, you must explicitly +// set S3 Object Ownership for the bucket to a different value than the default, +// BucketOwnerEnforced. Additionally, if your desired bucket ACL grants public +// access, you must first create the bucket (without the bucket ACL) and +// then explicitly disable Block Public Access on the bucket before using +// PutBucketAcl to set the ACL. If you try to create a bucket with a public +// ACL, the request will fail. For the majority of modern use cases in S3, +// we recommend that you keep all Block Public Access settings enabled and +// keep ACLs disabled. If you would like to share data with users outside +// of your account, you can use bucket policies as needed. For more information, +// see Controlling ownership of objects and disabling ACLs for your bucket +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// and Blocking public access to your Amazon S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html) +// in the Amazon S3 User Guide. S3 Block Public Access - If your specific +// use case requires granting public access to your S3 resources, you can +// disable Block Public Access. Specifically, you can create a new bucket +// with Block Public Access enabled, then separately call the DeletePublicAccessBlock +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// API. To use this operation, you must have the s3:PutBucketPublicAccessBlock +// permission. For more information about S3 Block Public Access, see Blocking +// public access to your Amazon S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - You must have the s3express:CreateBucket +// permission in an IAM identity-based policy instead of a bucket policy. +// Cross-account access to this API operation isn't supported. This operation +// can only be performed by the Amazon Web Services account that owns the +// resource. For more information about directory bucket policies and permissions, +// see Amazon Web Services Identity and Access Management (IAM) for S3 Express +// One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. The permissions for ACLs, Object Lock, S3 +// Object Ownership, and S3 Block Public Access are not supported for directory +// buckets. For directory buckets, all Block Public Access settings are enabled +// at the bucket level and S3 Object Ownership is set to Bucket owner enforced +// (ACLs disabled). These settings can't be modified. For more information +// about permissions for creating and working with directory buckets, see +// Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) +// in the Amazon S3 User Guide. For more information about supported S3 features +// for directory buckets, see Features of S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-one-zone.html#s3-express-features) +// in the Amazon S3 User Guide. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. +// +// The following operations are related to CreateBucket: +// +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CreateBucket for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeBucketAlreadyExists "BucketAlreadyExists" +// The requested bucket name is not available. The bucket namespace is shared +// by all users of the system. Select a different name and try again. +// +// - ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou" +// The bucket you tried to create already exists, and you own it. Amazon S3 +// returns this error in all Amazon Web Services Regions except in the North +// Virginia Region. For legacy compatibility, if you re-create an existing bucket +// that you already own in the North Virginia Region, Amazon S3 returns 200 +// OK and resets the bucket access control lists (ACLs). +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + return out, req.Send() +} + +// CreateBucketWithContext is the same as CreateBucket with the addition of +// the ability to pass a context and additional request options. +// +// See CreateBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateBucketWithContext(ctx aws.Context, input *CreateBucketInput, opts ...request.Option) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateMultipartUpload = "CreateMultipartUpload" + +// CreateMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CreateMultipartUpload operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateMultipartUpload for more information on using the CreateMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateMultipartUploadRequest method. +// req, resp := client.CreateMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) { + op := &request.Operation{ + Name: opCreateMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?uploads", + } + + if input == nil { + input = &CreateMultipartUploadInput{} + } + + output = &CreateMultipartUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateMultipartUpload API operation for Amazon Simple Storage Service. +// +// This action initiates a multipart upload and returns an upload ID. This upload +// ID is used to associate all of the parts in the specific multipart upload. +// You specify this upload ID in each of your subsequent upload part requests +// (see UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)). +// You also include this upload ID in the final request to either complete or +// abort the multipart upload request. For more information about multipart +// uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) +// in the Amazon S3 User Guide. +// +// After you initiate a multipart upload and upload one or more parts, to stop +// being charged for storing the uploaded parts, you must either complete or +// abort the multipart upload. Amazon S3 frees up the space used to store the +// parts and stops charging you for storing them only after you either complete +// or abort a multipart upload. +// +// If you have configured a lifecycle rule to abort incomplete multipart uploads, +// the created multipart upload must be completed within the number of days +// specified in the bucket lifecycle configuration. Otherwise, the incomplete +// multipart upload becomes eligible for an abort action and Amazon S3 aborts +// the multipart upload. For more information, see Aborting Incomplete Multipart +// Uploads Using a Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). +// +// - Directory buckets - S3 Lifecycle is not supported by directory buckets. +// +// - Directory buckets - For directory buckets, you must make requests for +// this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// # Request signing +// +// For request signing, multipart upload is just a series of regular requests. +// You initiate a multipart upload, send one or more requests to upload parts, +// and then complete the multipart upload process. You sign each request individually. +// There is nothing special about signing multipart upload requests. For more +// information about signing, see Authenticating Requests (Amazon Web Services +// Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - For information about the permissions +// required to use the multipart upload API, see Multipart upload and permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. To perform a multipart upload with encryption +// by using an Amazon Web Services KMS key, the requester must have permission +// to the kms:Decrypt and kms:GenerateDataKey* actions on the key. These +// permissions are required because Amazon S3 must decrypt and read data +// from the encrypted file parts before it completes the multipart upload. +// For more information, see Multipart upload API and permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) +// and Protecting data using server-side encryption with Amazon Web Services +// KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// Encryption +// +// - General purpose buckets - Server-side encryption is for data encryption +// at rest. Amazon S3 encrypts your data as it writes it to disks in its +// data centers and decrypts it when you access it. Amazon S3 automatically +// encrypts all new objects that are uploaded to an S3 bucket. When doing +// a multipart upload, if you don't specify encryption information in your +// request, the encryption setting of the uploaded parts is set to the default +// encryption configuration of the destination bucket. By default, all buckets +// have a base level of encryption configuration that uses server-side encryption +// with Amazon S3 managed keys (SSE-S3). If the destination bucket has a +// default encryption configuration that uses server-side encryption with +// an Key Management Service (KMS) key (SSE-KMS), or a customer-provided +// encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a +// customer-provided key to encrypt the uploaded parts. When you perform +// a CreateMultipartUpload operation, if you want to use a different type +// of encryption setting for the uploaded parts, you can request that Amazon +// S3 encrypts the object with a different encryption key (such as an Amazon +// S3 managed key, a KMS key, or a customer-provided key). When the encryption +// setting in your request is different from the default encryption configuration +// of the destination bucket, the encryption setting in your request takes +// precedence. If you choose to provide your own encryption key, the request +// headers you provide in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// requests must match the headers you used in the CreateMultipartUpload +// request. Use KMS keys (SSE-KMS) that include the Amazon Web Services managed +// key (aws/s3) and KMS customer managed keys stored in Key Management Service +// (KMS) – If you want Amazon Web Services to manage the keys used to encrypt +// data, specify the following headers in the request. x-amz-server-side-encryption +// x-amz-server-side-encryption-aws-kms-key-id x-amz-server-side-encryption-context +// If you specify x-amz-server-side-encryption:aws:kms, but don't provide +// x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon +// Web Services managed key (aws/s3 key) in KMS to protect the data. To perform +// a multipart upload with encryption by using an Amazon Web Services KMS +// key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey* +// actions on the key. These permissions are required because Amazon S3 must +// decrypt and read data from the encrypted file parts before it completes +// the multipart upload. For more information, see Multipart upload API and +// permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) +// and Protecting data using server-side encryption with Amazon Web Services +// KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) +// in the Amazon S3 User Guide. If your Identity and Access Management (IAM) +// user or role is in the same Amazon Web Services account as the KMS key, +// then you must have these permissions on the key policy. If your IAM user +// or role is in a different account from the key, then you must have the +// permissions on both the key policy and your IAM user or role. All GET +// and PUT requests for an object protected by KMS fail if you don't make +// them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), +// or Signature Version 4. For information about configuring any of the officially +// supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying +// the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) +// in the Amazon S3 User Guide. For more information about server-side encryption +// with KMS keys (SSE-KMS), see Protecting Data Using Server-Side Encryption +// with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) +// in the Amazon S3 User Guide. Use customer-provided encryption keys (SSE-C) +// – If you want to manage your own encryption keys, provide all the following +// headers in the request. x-amz-server-side-encryption-customer-algorithm +// x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 +// For more information about server-side encryption with customer-provided +// encryption keys (SSE-C), see Protecting data using server-side encryption +// with customer-provided encryption keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) +// in the Amazon S3 User Guide. +// +// - Directory buckets -For directory buckets, only server-side encryption +// with Amazon S3 managed keys (SSE-S3) (AES256) is supported. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// The following operations are related to CreateMultipartUpload: +// +// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CreateMultipartUpload for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + return out, req.Send() +} + +// CreateMultipartUploadWithContext is the same as CreateMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CreateMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateMultipartUploadWithContext(ctx aws.Context, input *CreateMultipartUploadInput, opts ...request.Option) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateSession = "CreateSession" + +// CreateSessionRequest generates a "aws/request.Request" representing the +// client's request for the CreateSession operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateSession for more information on using the CreateSession +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateSessionRequest method. +// req, resp := client.CreateSessionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateSession +func (c *S3) CreateSessionRequest(input *CreateSessionInput) (req *request.Request, output *CreateSessionOutput) { + op := &request.Operation{ + Name: opCreateSession, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?session", + } + + if input == nil { + input = &CreateSessionInput{} + } + + output = &CreateSessionOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateSession API operation for Amazon Simple Storage Service. +// +// Creates a session that establishes temporary security credentials to support +// fast authentication and authorization for the Zonal endpoint APIs on directory +// buckets. For more information about Zonal endpoint APIs that include the +// Availability Zone in the request endpoint, see S3 Express One Zone APIs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-APIs.html) +// in the Amazon S3 User Guide. +// +// To make Zonal endpoint API requests on a directory bucket, use the CreateSession +// API operation. Specifically, you grant s3express:CreateSession permission +// to a bucket in a bucket policy or an IAM identity-based policy. Then, you +// use IAM credentials to make the CreateSession API request on the bucket, +// which returns temporary security credentials that include the access key +// ID, secret access key, session token, and expiration. These credentials have +// associated permissions to access the Zonal endpoint APIs. After the session +// is created, you don’t need to use other policies to grant permissions to +// each Zonal endpoint API individually. Instead, in your Zonal endpoint API +// requests, you sign your requests by applying the temporary security credentials +// of the session to the request headers and following the SigV4 protocol for +// authentication. You also apply the session token to the x-amz-s3session-token +// request header for authorization. Temporary security credentials are scoped +// to the bucket and expire after 5 minutes. After the expiration time, any +// calls that you make with those credentials will fail. You must use IAM credentials +// again to make a CreateSession API request that generates a new set of temporary +// credentials for use. Temporary credentials cannot be extended or refreshed +// beyond the original specified interval. +// +// If you use Amazon Web Services SDKs, SDKs handle the session token refreshes +// automatically to avoid service interruptions when a session expires. We recommend +// that you use the Amazon Web Services SDKs to initiate and manage requests +// to the CreateSession API. For more information, see Performance guidelines +// and design patterns (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-optimizing-performance-guidelines-design-patterns.html#s3-express-optimizing-performance-session-authentication) +// in the Amazon S3 User Guide. +// +// - You must make requests for this API operation to the Zonal endpoint. +// These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com. +// Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// - CopyObject API operation - Unlike other Zonal endpoint APIs, the CopyObject +// API operation doesn't use the temporary security credentials returned +// from the CreateSession API operation for authentication and authorization. +// For information about authentication and authorization of the CopyObject +// API operation on directory buckets, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html). +// +// - HeadBucket API operation - Unlike other Zonal endpoint APIs, the HeadBucket +// API operation doesn't use the temporary security credentials returned +// from the CreateSession API operation for authentication and authorization. +// For information about authentication and authorization of the HeadBucket +// API operation on directory buckets, see HeadBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html). +// +// # Permissions +// +// To obtain temporary security credentials, you must create a bucket policy +// or an IAM identity-based policy that grants s3express:CreateSession permission +// to the bucket. In a policy, you can have the s3express:SessionMode condition +// key to control who can create a ReadWrite or ReadOnly session. For more information +// about ReadWrite or ReadOnly sessions, see x-amz-create-session-mode (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html#API_CreateSession_RequestParameters). +// For example policies, see Example bucket policies for S3 Express One Zone +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// and Amazon Web Services Identity and Access Management (IAM) identity-based +// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) +// in the Amazon S3 User Guide. +// +// To grant cross-account access to Zonal endpoint APIs, the bucket policy should +// also grant both accounts the s3express:CreateSession permission. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CreateSession for usage and error information. +// +// Returned Error Codes: +// - ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateSession +func (c *S3) CreateSession(input *CreateSessionInput) (*CreateSessionOutput, error) { + req, out := c.CreateSessionRequest(input) + return out, req.Send() +} + +// CreateSessionWithContext is the same as CreateSession with the addition of +// the ability to pass a context and additional request options. +// +// See CreateSession for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateSessionWithContext(ctx aws.Context, input *CreateSessionInput, opts ...request.Option) (*CreateSessionOutput, error) { + req, out := c.CreateSessionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucket = "DeleteBucket" + +// DeleteBucketRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucket operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucket for more information on using the DeleteBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteBucketRequest method. +// req, resp := client.DeleteBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { + op := &request.Operation{ + Name: opDeleteBucket, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &DeleteBucketInput{} + } + + output = &DeleteBucketOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucket API operation for Amazon Simple Storage Service. +// +// Deletes the S3 bucket. All objects (including all object versions and delete +// markers) in the bucket must be deleted before the bucket itself can be deleted. +// +// - Directory buckets - If multipart uploads in a directory bucket are in +// progress, you can't delete the bucket until all the in-progress multipart +// uploads are aborted or completed. +// +// - Directory buckets - For directory buckets, you must make requests for +// this API operation to the Regional endpoint. These endpoints support path-style +// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information, +// see Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - You must have the s3:DeleteBucket +// permission on the specified bucket in a policy. +// +// - Directory bucket permissions - You must have the s3express:DeleteBucket +// permission in an IAM identity-based policy instead of a bucket policy. +// Cross-account access to this API operation isn't supported. This operation +// can only be performed by the Amazon Web Services account that owns the +// resource. For more information about directory bucket policies and permissions, +// see Amazon Web Services Identity and Access Management (IAM) for S3 Express +// One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. +// +// The following operations are related to DeleteBucket: +// +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucket for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + return out, req.Send() +} + +// DeleteBucketWithContext is the same as DeleteBucket with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketWithContext(ctx aws.Context, input *DeleteBucketInput, opts ...request.Option) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration" + +// DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketAnalyticsConfiguration for more information on using the DeleteBucketAnalyticsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteBucketAnalyticsConfigurationRequest method. +// req, resp := client.DeleteBucketAnalyticsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyticsConfigurationInput) (req *request.Request, output *DeleteBucketAnalyticsConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketAnalyticsConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &DeleteBucketAnalyticsConfigurationInput{} + } + + output = &DeleteBucketAnalyticsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Deletes an analytics configuration for the bucket (specified by the analytics +// configuration ID). +// +// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// For information about the Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// +// The following operations are related to DeleteBucketAnalyticsConfiguration: +// +// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// +// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// +// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketAnalyticsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +func (c *S3) DeleteBucketAnalyticsConfiguration(input *DeleteBucketAnalyticsConfigurationInput) (*DeleteBucketAnalyticsConfigurationOutput, error) { + req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketAnalyticsConfigurationWithContext is the same as DeleteBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *DeleteBucketAnalyticsConfigurationInput, opts ...request.Option) (*DeleteBucketAnalyticsConfigurationOutput, error) { + req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketCors = "DeleteBucketCors" + +// DeleteBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketCors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketCors for more information on using the DeleteBucketCors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteBucketCorsRequest method. +// req, resp := client.DeleteBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { + op := &request.Operation{ + Name: opDeleteBucketCors, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &DeleteBucketCorsInput{} + } + + output = &DeleteBucketCorsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketCors API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Deletes the cors configuration information set for the bucket. +// +// To use this operation, you must have permission to perform the s3:PutBucketCORS +// action. The bucket owner has this permission by default and can grant this +// permission to others. +// +// For information about cors, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) +// in the Amazon S3 User Guide. +// +// Related Resources +// +// - PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) +// +// - RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketCors for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + return out, req.Send() +} + +// DeleteBucketCorsWithContext is the same as DeleteBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketCorsWithContext(ctx aws.Context, input *DeleteBucketCorsInput, opts ...request.Option) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketEncryption = "DeleteBucketEncryption" + +// DeleteBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketEncryption for more information on using the DeleteBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteBucketEncryptionRequest method. +// req, resp := client.DeleteBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption +func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) (req *request.Request, output *DeleteBucketEncryptionOutput) { + op := &request.Operation{ + Name: opDeleteBucketEncryption, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &DeleteBucketEncryptionInput{} + } + + output = &DeleteBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketEncryption API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// This implementation of the DELETE action resets the default encryption for +// the bucket as server-side encryption with Amazon S3 managed keys (SSE-S3). +// For information about the bucket default encryption feature, see Amazon S3 +// Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// in the Amazon S3 User Guide. +// +// To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// The following operations are related to DeleteBucketEncryption: +// +// - PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) +// +// - GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption +func (c *S3) DeleteBucketEncryption(input *DeleteBucketEncryptionInput) (*DeleteBucketEncryptionOutput, error) { + req, out := c.DeleteBucketEncryptionRequest(input) + return out, req.Send() +} + +// DeleteBucketEncryptionWithContext is the same as DeleteBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketEncryptionWithContext(ctx aws.Context, input *DeleteBucketEncryptionInput, opts ...request.Option) (*DeleteBucketEncryptionOutput, error) { + req, out := c.DeleteBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketIntelligentTieringConfiguration = "DeleteBucketIntelligentTieringConfiguration" + +// DeleteBucketIntelligentTieringConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketIntelligentTieringConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketIntelligentTieringConfiguration for more information on using the DeleteBucketIntelligentTieringConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteBucketIntelligentTieringConfigurationRequest method. +// req, resp := client.DeleteBucketIntelligentTieringConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketIntelligentTieringConfiguration +func (c *S3) DeleteBucketIntelligentTieringConfigurationRequest(input *DeleteBucketIntelligentTieringConfigurationInput) (req *request.Request, output *DeleteBucketIntelligentTieringConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketIntelligentTieringConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &DeleteBucketIntelligentTieringConfigurationInput{} + } + + output = &DeleteBucketIntelligentTieringConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Deletes the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in three low latency and high throughput +// access tiers. To get the lowest storage cost on data that can be accessed +// in minutes to hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of +// object size or retention period. If the size of an object is less than 128 +// KB, it is not monitored and not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the Frequent Access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// For more information, see Storage class for automatically optimizing frequently +// and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to DeleteBucketIntelligentTieringConfiguration include: +// +// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// +// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketIntelligentTieringConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketIntelligentTieringConfiguration +func (c *S3) DeleteBucketIntelligentTieringConfiguration(input *DeleteBucketIntelligentTieringConfigurationInput) (*DeleteBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.DeleteBucketIntelligentTieringConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketIntelligentTieringConfigurationWithContext is the same as DeleteBucketIntelligentTieringConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketIntelligentTieringConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketIntelligentTieringConfigurationWithContext(ctx aws.Context, input *DeleteBucketIntelligentTieringConfigurationInput, opts ...request.Option) (*DeleteBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.DeleteBucketIntelligentTieringConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration" + +// DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketInventoryConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketInventoryConfiguration for more information on using the DeleteBucketInventoryConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteBucketInventoryConfigurationRequest method. +// req, resp := client.DeleteBucketInventoryConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInventoryConfigurationInput) (req *request.Request, output *DeleteBucketInventoryConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketInventoryConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &DeleteBucketInventoryConfigurationInput{} + } + + output = &DeleteBucketInventoryConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Deletes an inventory configuration (identified by the inventory ID) from +// the bucket. +// +// To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). +// +// Operations related to DeleteBucketInventoryConfiguration include: +// +// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// +// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// +// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketInventoryConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +func (c *S3) DeleteBucketInventoryConfiguration(input *DeleteBucketInventoryConfigurationInput) (*DeleteBucketInventoryConfigurationOutput, error) { + req, out := c.DeleteBucketInventoryConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketInventoryConfigurationWithContext is the same as DeleteBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketInventoryConfigurationWithContext(ctx aws.Context, input *DeleteBucketInventoryConfigurationInput, opts ...request.Option) (*DeleteBucketInventoryConfigurationOutput, error) { + req, out := c.DeleteBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketLifecycle = "DeleteBucketLifecycle" + +// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketLifecycle operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketLifecycle for more information on using the DeleteBucketLifecycle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteBucketLifecycleRequest method. +// req, resp := client.DeleteBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { + op := &request.Operation{ + Name: opDeleteBucketLifecycle, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &DeleteBucketLifecycleInput{} + } + + output = &DeleteBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketLifecycle API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Deletes the lifecycle configuration from the specified bucket. Amazon S3 +// removes all the lifecycle configuration rules in the lifecycle subresource +// associated with the bucket. Your objects never expire, and Amazon S3 no longer +// automatically deletes any objects on the basis of rules contained in the +// deleted lifecycle configuration. +// +// To use this operation, you must have permission to perform the s3:PutLifecycleConfiguration +// action. By default, the bucket owner has this permission and the bucket owner +// can grant this permission to others. +// +// There is usually some time lag before lifecycle configuration deletion is +// fully propagated to all the Amazon S3 systems. +// +// For more information about the object expiration, see Elements to Describe +// Lifecycle Actions (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions). +// +// Related actions include: +// +// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// +// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketLifecycle for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + return out, req.Send() +} + +// DeleteBucketLifecycleWithContext is the same as DeleteBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketLifecycleWithContext(ctx aws.Context, input *DeleteBucketLifecycleInput, opts ...request.Option) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration" + +// DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketMetricsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketMetricsConfiguration for more information on using the DeleteBucketMetricsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteBucketMetricsConfigurationRequest method. +// req, resp := client.DeleteBucketMetricsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsConfigurationInput) (req *request.Request, output *DeleteBucketMetricsConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketMetricsConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &DeleteBucketMetricsConfigurationInput{} + } + + output = &DeleteBucketMetricsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Deletes a metrics configuration for the Amazon CloudWatch request metrics +// (specified by the metrics configuration ID) from the bucket. Note that this +// doesn't include the daily storage metrics. +// +// To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to DeleteBucketMetricsConfiguration: +// +// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) +// +// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// +// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// +// - Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketMetricsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +func (c *S3) DeleteBucketMetricsConfiguration(input *DeleteBucketMetricsConfigurationInput) (*DeleteBucketMetricsConfigurationOutput, error) { + req, out := c.DeleteBucketMetricsConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketMetricsConfigurationWithContext is the same as DeleteBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketMetricsConfigurationWithContext(ctx aws.Context, input *DeleteBucketMetricsConfigurationInput, opts ...request.Option) (*DeleteBucketMetricsConfigurationOutput, error) { + req, out := c.DeleteBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketOwnershipControls = "DeleteBucketOwnershipControls" + +// DeleteBucketOwnershipControlsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketOwnershipControls operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketOwnershipControls for more information on using the DeleteBucketOwnershipControls +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteBucketOwnershipControlsRequest method. +// req, resp := client.DeleteBucketOwnershipControlsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOwnershipControls +func (c *S3) DeleteBucketOwnershipControlsRequest(input *DeleteBucketOwnershipControlsInput) (req *request.Request, output *DeleteBucketOwnershipControlsOutput) { + op := &request.Operation{ + Name: opDeleteBucketOwnershipControls, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?ownershipControls", + } + + if input == nil { + input = &DeleteBucketOwnershipControlsInput{} + } + + output = &DeleteBucketOwnershipControlsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketOwnershipControls API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Removes OwnershipControls for an Amazon S3 bucket. To use this operation, +// you must have the s3:PutBucketOwnershipControls permission. For more information +// about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// For information about Amazon S3 Object Ownership, see Using Object Ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html). +// +// The following operations are related to DeleteBucketOwnershipControls: +// +// - GetBucketOwnershipControls +// +// - PutBucketOwnershipControls +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketOwnershipControls for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOwnershipControls +func (c *S3) DeleteBucketOwnershipControls(input *DeleteBucketOwnershipControlsInput) (*DeleteBucketOwnershipControlsOutput, error) { + req, out := c.DeleteBucketOwnershipControlsRequest(input) + return out, req.Send() +} + +// DeleteBucketOwnershipControlsWithContext is the same as DeleteBucketOwnershipControls with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketOwnershipControls for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketOwnershipControlsWithContext(ctx aws.Context, input *DeleteBucketOwnershipControlsInput, opts ...request.Option) (*DeleteBucketOwnershipControlsOutput, error) { + req, out := c.DeleteBucketOwnershipControlsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketPolicy = "DeleteBucketPolicy" + +// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketPolicy for more information on using the DeleteBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteBucketPolicyRequest method. +// req, resp := client.DeleteBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { + op := &request.Operation{ + Name: opDeleteBucketPolicy, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &DeleteBucketPolicyInput{} + } + + output = &DeleteBucketPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketPolicy API operation for Amazon Simple Storage Service. +// +// Deletes the policy of a specified bucket. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Regional endpoint. These endpoints support path-style +// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// # Permissions +// +// If you are using an identity other than the root user of the Amazon Web Services +// account that owns the bucket, the calling identity must both have the DeleteBucketPolicy +// permissions on the specified bucket and belong to the bucket owner's account +// in order to use this operation. +// +// If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 +// Access Denied error. If you have the correct permissions, but you're not +// using an identity that belongs to the bucket owner's account, Amazon S3 returns +// a 405 Method Not Allowed error. +// +// To ensure that bucket owners don't inadvertently lock themselves out of their +// own buckets, the root principal in a bucket owner's Amazon Web Services account +// can perform the GetBucketPolicy, PutBucketPolicy, and DeleteBucketPolicy +// API actions, even if their bucket policy explicitly denies the root principal's +// access. Bucket owner root principals can only be blocked from performing +// these API actions by VPC endpoint policies and Amazon Web Services Organizations +// policies. +// +// - General purpose bucket permissions - The s3:DeleteBucketPolicy permission +// is required in a policy. For more information about general purpose buckets +// bucket policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation, +// you must have the s3express:DeleteBucketPolicy permission in an IAM identity-based +// policy instead of a bucket policy. Cross-account access to this API operation +// isn't supported. This operation can only be performed by the Amazon Web +// Services account that owns the resource. For more information about directory +// bucket policies and permissions, see Amazon Web Services Identity and +// Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. +// +// The following operations are related to DeleteBucketPolicy +// +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) + return out, req.Send() +} + +// DeleteBucketPolicyWithContext is the same as DeleteBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketPolicyWithContext(ctx aws.Context, input *DeleteBucketPolicyInput, opts ...request.Option) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketReplication = "DeleteBucketReplication" + +// DeleteBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketReplication operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketReplication for more information on using the DeleteBucketReplication +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteBucketReplicationRequest method. +// req, resp := client.DeleteBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) { + op := &request.Operation{ + Name: opDeleteBucketReplication, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &DeleteBucketReplicationInput{} + } + + output = &DeleteBucketReplicationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketReplication API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Deletes the replication configuration from the bucket. +// +// To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration +// action. The bucket owner has these permissions by default and can grant it +// to others. For more information about permissions, see Permissions Related +// to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// It can take a while for the deletion of a replication configuration to fully +// propagate. +// +// For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) +// in the Amazon S3 User Guide. +// +// The following operations are related to DeleteBucketReplication: +// +// - PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) +// +// - GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketReplication for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { + req, out := c.DeleteBucketReplicationRequest(input) + return out, req.Send() +} + +// DeleteBucketReplicationWithContext is the same as DeleteBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketReplicationWithContext(ctx aws.Context, input *DeleteBucketReplicationInput, opts ...request.Option) (*DeleteBucketReplicationOutput, error) { + req, out := c.DeleteBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketTagging = "DeleteBucketTagging" + +// DeleteBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketTagging for more information on using the DeleteBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteBucketTaggingRequest method. +// req, resp := client.DeleteBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { + op := &request.Operation{ + Name: opDeleteBucketTagging, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &DeleteBucketTaggingInput{} + } + + output = &DeleteBucketTaggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketTagging API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Deletes the tags from the bucket. +// +// To use this operation, you must have permission to perform the s3:PutBucketTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// The following operations are related to DeleteBucketTagging: +// +// - GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) +// +// - PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) + return out, req.Send() +} + +// DeleteBucketTaggingWithContext is the same as DeleteBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketTaggingWithContext(ctx aws.Context, input *DeleteBucketTaggingInput, opts ...request.Option) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketWebsite = "DeleteBucketWebsite" + +// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketWebsite operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketWebsite for more information on using the DeleteBucketWebsite +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteBucketWebsiteRequest method. +// req, resp := client.DeleteBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { + op := &request.Operation{ + Name: opDeleteBucketWebsite, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &DeleteBucketWebsiteInput{} + } + + output = &DeleteBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketWebsite API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// This action removes the website configuration for a bucket. Amazon S3 returns +// a 200 OK response upon successfully deleting a website configuration on the +// specified bucket. You will get a 200 OK response if the website configuration +// you are trying to delete does not exist on the bucket. Amazon S3 returns +// a 404 response if the bucket specified in the request does not exist. +// +// This DELETE action requires the S3:DeleteBucketWebsite permission. By default, +// only the bucket owner can delete the website configuration attached to a +// bucket. However, bucket owners can grant other users permission to delete +// the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite +// permission. +// +// For more information about hosting websites, see Hosting Websites on Amazon +// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// The following operations are related to DeleteBucketWebsite: +// +// - GetBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html) +// +// - PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketWebsite for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + return out, req.Send() +} + +// DeleteBucketWebsiteWithContext is the same as DeleteBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketWebsiteWithContext(ctx aws.Context, input *DeleteBucketWebsiteInput, opts ...request.Option) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObject = "DeleteObject" + +// DeleteObjectRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteObject for more information on using the DeleteObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteObjectRequest method. +// req, resp := client.DeleteObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { + op := &request.Operation{ + Name: opDeleteObject, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &DeleteObjectInput{} + } + + output = &DeleteObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteObject API operation for Amazon Simple Storage Service. +// +// Removes an object from a bucket. The behavior depends on the bucket's versioning +// state: +// +// - If bucket versioning is not enabled, the operation permanently deletes +// the object. +// +// - If bucket versioning is enabled, the operation inserts a delete marker, +// which becomes the current version of the object. To permanently delete +// an object in a versioned bucket, you must include the object’s versionId +// in the request. For more information about versioning-enabled buckets, +// see Deleting object versions from a versioning-enabled bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectVersions.html). +// +// - If bucket versioning is suspended, the operation removes the object +// that has a null versionId, if there is one, and inserts a delete marker +// that becomes the current version of the object. If there isn't an object +// with a null versionId, and all versions of the object have a versionId, +// Amazon S3 does not remove the object and only inserts a delete marker. +// To permanently delete an object that has a versionId, you must include +// the object’s versionId in the request. For more information about versioning-suspended +// buckets, see Deleting objects from versioning-suspended buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectsfromVersioningSuspendedBuckets.html). +// +// - Directory buckets - S3 Versioning isn't enabled and supported for directory +// buckets. For this API operation, only the null value of the version ID +// is supported by directory buckets. You can only specify null to the versionId +// query parameter in the request. +// +// - Directory buckets - For directory buckets, you must make requests for +// this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// To remove a specific version, you must use the versionId query parameter. +// Using this query parameter permanently deletes the version. If the object +// deleted is a delete marker, Amazon S3 sets the response header x-amz-delete-marker +// to true. +// +// If the object you want to delete is in a bucket where the bucket versioning +// configuration is MFA Delete enabled, you must include the x-amz-mfa request +// header in the DELETE versionId request. Requests that include x-amz-mfa must +// use HTTPS. For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html) +// in the Amazon S3 User Guide. To see sample requests that use versioning, +// see Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). +// +// Directory buckets - MFA delete is not supported by directory buckets. +// +// You can delete objects by explicitly calling DELETE Object or calling (PutBucketLifecycle +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) +// to enable Amazon S3 to remove them for you. If you want to block users or +// accounts from removing or deleting objects from your bucket, you must deny +// them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration +// actions. +// +// Directory buckets - S3 Lifecycle is not supported by directory buckets. +// +// Permissions +// +// - General purpose bucket permissions - The following permissions are required +// in your policies when your DeleteObjects request includes specific headers. +// s3:DeleteObject - To delete an object from a bucket, you must always have +// the s3:DeleteObject permission. s3:DeleteObjectVersion - To delete a specific +// version of an object from a versioning-enabled bucket, you must have the +// s3:DeleteObjectVersion permission. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// The following action is related to DeleteObject: +// +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObject for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + return out, req.Send() +} + +// DeleteObjectWithContext is the same as DeleteObject with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectWithContext(ctx aws.Context, input *DeleteObjectInput, opts ...request.Option) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObjectTagging = "DeleteObjectTagging" + +// DeleteObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObjectTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteObjectTagging for more information on using the DeleteObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteObjectTaggingRequest method. +// req, resp := client.DeleteObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *request.Request, output *DeleteObjectTaggingOutput) { + op := &request.Operation{ + Name: opDeleteObjectTagging, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &DeleteObjectTaggingInput{} + } + + output = &DeleteObjectTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteObjectTagging API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Removes the entire tag set from the specified object. For more information +// about managing object tags, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// To use this operation, you must have permission to perform the s3:DeleteObjectTagging +// action. +// +// To delete tags of a specific object version, add the versionId query parameter +// in the request. You will need permission for the s3:DeleteObjectVersionTagging +// action. +// +// The following operations are related to DeleteObjectTagging: +// +// - PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) +// +// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) { + req, out := c.DeleteObjectTaggingRequest(input) + return out, req.Send() +} + +// DeleteObjectTaggingWithContext is the same as DeleteObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectTaggingWithContext(ctx aws.Context, input *DeleteObjectTaggingInput, opts ...request.Option) (*DeleteObjectTaggingOutput, error) { + req, out := c.DeleteObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObjects = "DeleteObjects" + +// DeleteObjectsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObjects operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteObjects for more information on using the DeleteObjects +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteObjectsRequest method. +// req, resp := client.DeleteObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { + op := &request.Operation{ + Name: opDeleteObjects, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}?delete", + } + + if input == nil { + input = &DeleteObjectsInput{} + } + + output = &DeleteObjectsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// DeleteObjects API operation for Amazon Simple Storage Service. +// +// This operation enables you to delete multiple objects from a bucket using +// a single HTTP request. If you know the object keys that you want to delete, +// then this operation provides a suitable alternative to sending individual +// delete requests, reducing per-request overhead. +// +// The request can contain a list of up to 1000 keys that you want to delete. +// In the XML, you provide the object key names, and optionally, version IDs +// if you want to delete a specific version of the object from a versioning-enabled +// bucket. For each key, Amazon S3 performs a delete operation and returns the +// result of that delete, success or failure, in the response. Note that if +// the object specified in the request is not found, Amazon S3 returns the result +// as deleted. +// +// - Directory buckets - S3 Versioning isn't enabled and supported for directory +// buckets. +// +// - Directory buckets - For directory buckets, you must make requests for +// this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// The operation supports two modes for the response: verbose and quiet. By +// default, the operation uses verbose mode in which the response includes the +// result of deletion of each key in your request. In quiet mode the response +// includes only keys where the delete operation encountered an error. For a +// successful deletion in a quiet mode, the operation does not return any information +// about the delete in the response body. +// +// When performing this action on an MFA Delete enabled bucket, that attempts +// to delete any versioned objects, you must include an MFA token. If you do +// not provide one, the entire request will fail, even if there are non-versioned +// objects you are trying to delete. If you provide an invalid token, whether +// there are versioned keys in the request or not, the entire Multi-Object Delete +// request will fail. For information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete) +// in the Amazon S3 User Guide. +// +// Directory buckets - MFA delete is not supported by directory buckets. +// +// Permissions +// +// - General purpose bucket permissions - The following permissions are required +// in your policies when your DeleteObjects request includes specific headers. +// s3:DeleteObject - To delete an object from a bucket, you must always specify +// the s3:DeleteObject permission. s3:DeleteObjectVersion - To delete a specific +// version of an object from a versioning-enabled bucket, you must specify +// the s3:DeleteObjectVersion permission. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// Content-MD5 request header +// +// - General purpose bucket - The Content-MD5 request header is required +// for all Multi-Object Delete requests. Amazon S3 uses the header value +// to ensure that your request body has not been altered in transit. +// +// - Directory bucket - The Content-MD5 request header or a additional checksum +// request header (including x-amz-checksum-crc32, x-amz-checksum-crc32c, +// x-amz-checksum-sha1, or x-amz-checksum-sha256) is required for all Multi-Object +// Delete requests. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// The following operations are related to DeleteObjects: +// +// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObjects for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + return out, req.Send() +} + +// DeleteObjectsWithContext is the same as DeleteObjects with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObjects for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectsWithContext(ctx aws.Context, input *DeleteObjectsInput, opts ...request.Option) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeletePublicAccessBlock = "DeletePublicAccessBlock" + +// DeletePublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the DeletePublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeletePublicAccessBlock for more information on using the DeletePublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeletePublicAccessBlockRequest method. +// req, resp := client.DeletePublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock +func (c *S3) DeletePublicAccessBlockRequest(input *DeletePublicAccessBlockInput) (req *request.Request, output *DeletePublicAccessBlockOutput) { + op := &request.Operation{ + Name: opDeletePublicAccessBlock, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?publicAccessBlock", + } + + if input == nil { + input = &DeletePublicAccessBlockInput{} + } + + output = &DeletePublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeletePublicAccessBlock API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use +// this operation, you must have the s3:PutBucketPublicAccessBlock permission. +// For more information about permissions, see Permissions Related to Bucket +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// The following operations are related to DeletePublicAccessBlock: +// +// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// +// - GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeletePublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock +func (c *S3) DeletePublicAccessBlock(input *DeletePublicAccessBlockInput) (*DeletePublicAccessBlockOutput, error) { + req, out := c.DeletePublicAccessBlockRequest(input) + return out, req.Send() +} + +// DeletePublicAccessBlockWithContext is the same as DeletePublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See DeletePublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeletePublicAccessBlockWithContext(ctx aws.Context, input *DeletePublicAccessBlockInput, opts ...request.Option) (*DeletePublicAccessBlockOutput, error) { + req, out := c.DeletePublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" + +// GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAccelerateConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketAccelerateConfiguration for more information on using the GetBucketAccelerateConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketAccelerateConfigurationRequest method. +// req, resp := client.GetBucketAccelerateConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketAccelerateConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?accelerate", + } + + if input == nil { + input = &GetBucketAccelerateConfigurationInput{} + } + + output = &GetBucketAccelerateConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// This implementation of the GET action uses the accelerate subresource to +// return the Transfer Acceleration state of a bucket, which is either Enabled +// or Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that +// enables you to perform faster data transfers to and from Amazon S3. +// +// To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// You set the Transfer Acceleration state of an existing bucket to Enabled +// or Suspended by using the PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) +// operation. +// +// A GET accelerate request does not return a state value for a bucket that +// has no transfer acceleration state. A bucket has no Transfer Acceleration +// state if a state has never been set on the bucket. +// +// For more information about transfer acceleration, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) +// in the Amazon S3 User Guide. +// +// The following operations are related to GetBucketAccelerateConfiguration: +// +// - PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAccelerateConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) { + req, out := c.GetBucketAccelerateConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketAccelerateConfigurationWithContext is the same as GetBucketAccelerateConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAccelerateConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAccelerateConfigurationWithContext(ctx aws.Context, input *GetBucketAccelerateConfigurationInput, opts ...request.Option) (*GetBucketAccelerateConfigurationOutput, error) { + req, out := c.GetBucketAccelerateConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketAcl = "GetBucketAcl" + +// GetBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketAcl for more information on using the GetBucketAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketAclRequest method. +// req, resp := client.GetBucketAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { + op := &request.Operation{ + Name: opGetBucketAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &GetBucketAclInput{} + } + + output = &GetBucketAclOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketAcl API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// This implementation of the GET action uses the acl subresource to return +// the access control list (ACL) of a bucket. To use GET to return the ACL of +// the bucket, you must have the READ_ACP access to the bucket. If READ_ACP +// permission is granted to the anonymous user, you can return the ACL of the +// bucket without using an authorization header. +// +// When you use this API operation with an access point, provide the alias of +// the access point in place of the bucket name. +// +// When you use this API operation with an Object Lambda access point, provide +// the alias of the Object Lambda access point in place of the bucket name. +// If the Object Lambda access point alias in a request is not valid, the error +// code InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). +// +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// requests to read ACLs are still supported and return the bucket-owner-full-control +// ACL with the owner being the account that created the bucket. For more information, +// see Controlling object ownership and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. +// +// The following operations are related to GetBucketAcl: +// +// - ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAcl for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + return out, req.Send() +} + +// GetBucketAclWithContext is the same as GetBucketAcl with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAclWithContext(ctx aws.Context, input *GetBucketAclInput, opts ...request.Option) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration" + +// GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAnalyticsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketAnalyticsConfiguration for more information on using the GetBucketAnalyticsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketAnalyticsConfigurationRequest method. +// req, resp := client.GetBucketAnalyticsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsConfigurationInput) (req *request.Request, output *GetBucketAnalyticsConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketAnalyticsConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &GetBucketAnalyticsConfigurationInput{} + } + + output = &GetBucketAnalyticsConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// This implementation of the GET action returns an analytics configuration +// (identified by the analytics configuration ID) from the bucket. +// +// To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// For information about Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) +// in the Amazon S3 User Guide. +// +// The following operations are related to GetBucketAnalyticsConfiguration: +// +// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// +// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// +// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAnalyticsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +func (c *S3) GetBucketAnalyticsConfiguration(input *GetBucketAnalyticsConfigurationInput) (*GetBucketAnalyticsConfigurationOutput, error) { + req, out := c.GetBucketAnalyticsConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketAnalyticsConfigurationWithContext is the same as GetBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *GetBucketAnalyticsConfigurationInput, opts ...request.Option) (*GetBucketAnalyticsConfigurationOutput, error) { + req, out := c.GetBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketCors = "GetBucketCors" + +// GetBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketCors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketCors for more information on using the GetBucketCors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketCorsRequest method. +// req, resp := client.GetBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { + op := &request.Operation{ + Name: opGetBucketCors, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &GetBucketCorsInput{} + } + + output = &GetBucketCorsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketCors API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns the Cross-Origin Resource Sharing (CORS) configuration information +// set for the bucket. +// +// To use this operation, you must have permission to perform the s3:GetBucketCORS +// action. By default, the bucket owner has this permission and can grant it +// to others. +// +// When you use this API operation with an access point, provide the alias of +// the access point in place of the bucket name. +// +// When you use this API operation with an Object Lambda access point, provide +// the alias of the Object Lambda access point in place of the bucket name. +// If the Object Lambda access point alias in a request is not valid, the error +// code InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). +// +// For more information about CORS, see Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). +// +// The following operations are related to GetBucketCors: +// +// - PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) +// +// - DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketCors for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + return out, req.Send() +} + +// GetBucketCorsWithContext is the same as GetBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketCorsWithContext(ctx aws.Context, input *GetBucketCorsInput, opts ...request.Option) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketEncryption = "GetBucketEncryption" + +// GetBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketEncryption for more information on using the GetBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketEncryptionRequest method. +// req, resp := client.GetBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption +func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *request.Request, output *GetBucketEncryptionOutput) { + op := &request.Operation{ + Name: opGetBucketEncryption, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &GetBucketEncryptionInput{} + } + + output = &GetBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketEncryption API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns the default encryption configuration for an Amazon S3 bucket. By +// default, all buckets have a default encryption configuration that uses server-side +// encryption with Amazon S3 managed keys (SSE-S3). For information about the +// bucket default encryption feature, see Amazon S3 Bucket Default Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// in the Amazon S3 User Guide. +// +// To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// The following operations are related to GetBucketEncryption: +// +// - PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) +// +// - DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption +func (c *S3) GetBucketEncryption(input *GetBucketEncryptionInput) (*GetBucketEncryptionOutput, error) { + req, out := c.GetBucketEncryptionRequest(input) + return out, req.Send() +} + +// GetBucketEncryptionWithContext is the same as GetBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketEncryptionWithContext(ctx aws.Context, input *GetBucketEncryptionInput, opts ...request.Option) (*GetBucketEncryptionOutput, error) { + req, out := c.GetBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketIntelligentTieringConfiguration = "GetBucketIntelligentTieringConfiguration" + +// GetBucketIntelligentTieringConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketIntelligentTieringConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketIntelligentTieringConfiguration for more information on using the GetBucketIntelligentTieringConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketIntelligentTieringConfigurationRequest method. +// req, resp := client.GetBucketIntelligentTieringConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketIntelligentTieringConfiguration +func (c *S3) GetBucketIntelligentTieringConfigurationRequest(input *GetBucketIntelligentTieringConfigurationInput) (req *request.Request, output *GetBucketIntelligentTieringConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketIntelligentTieringConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &GetBucketIntelligentTieringConfigurationInput{} + } + + output = &GetBucketIntelligentTieringConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Gets the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in three low latency and high throughput +// access tiers. To get the lowest storage cost on data that can be accessed +// in minutes to hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of +// object size or retention period. If the size of an object is less than 128 +// KB, it is not monitored and not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the Frequent Access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// For more information, see Storage class for automatically optimizing frequently +// and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to GetBucketIntelligentTieringConfiguration include: +// +// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketIntelligentTieringConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketIntelligentTieringConfiguration +func (c *S3) GetBucketIntelligentTieringConfiguration(input *GetBucketIntelligentTieringConfigurationInput) (*GetBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.GetBucketIntelligentTieringConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketIntelligentTieringConfigurationWithContext is the same as GetBucketIntelligentTieringConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketIntelligentTieringConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketIntelligentTieringConfigurationWithContext(ctx aws.Context, input *GetBucketIntelligentTieringConfigurationInput, opts ...request.Option) (*GetBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.GetBucketIntelligentTieringConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" + +// GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketInventoryConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketInventoryConfiguration for more information on using the GetBucketInventoryConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketInventoryConfigurationRequest method. +// req, resp := client.GetBucketInventoryConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryConfigurationInput) (req *request.Request, output *GetBucketInventoryConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketInventoryConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &GetBucketInventoryConfigurationInput{} + } + + output = &GetBucketInventoryConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns an inventory configuration (identified by the inventory configuration +// ID) from the bucket. +// +// To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). +// +// The following operations are related to GetBucketInventoryConfiguration: +// +// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) +// +// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// +// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketInventoryConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +func (c *S3) GetBucketInventoryConfiguration(input *GetBucketInventoryConfigurationInput) (*GetBucketInventoryConfigurationOutput, error) { + req, out := c.GetBucketInventoryConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketInventoryConfigurationWithContext is the same as GetBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketInventoryConfigurationWithContext(ctx aws.Context, input *GetBucketInventoryConfigurationInput, opts ...request.Option) (*GetBucketInventoryConfigurationOutput, error) { + req, out := c.GetBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLifecycle = "GetBucketLifecycle" + +// GetBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycle operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketLifecycle for more information on using the GetBucketLifecycle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketLifecycleRequest method. +// req, resp := client.GetBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// +// Deprecated: GetBucketLifecycle has been deprecated +func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated") + } + op := &request.Operation{ + Name: opGetBucketLifecycle, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleInput{} + } + + output = &GetBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLifecycle API operation for Amazon Simple Storage Service. +// +// For an updated version of this API, see GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html). +// If you configured a bucket lifecycle using the filter element, you should +// see the updated version of this topic. This topic is provided for backward +// compatibility. +// +// This operation is not supported by directory buckets. +// +// Returns the lifecycle configuration information set on the bucket. For information +// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). +// +// To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// GetBucketLifecycle has the following special error: +// +// - Error code: NoSuchLifecycleConfiguration Description: The lifecycle +// configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault +// Code Prefix: Client +// +// The following operations are related to GetBucketLifecycle: +// +// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// - PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) +// +// - DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLifecycle for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// +// Deprecated: GetBucketLifecycle has been deprecated +func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { + req, out := c.GetBucketLifecycleRequest(input) + return out, req.Send() +} + +// GetBucketLifecycleWithContext is the same as GetBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: GetBucketLifecycleWithContext has been deprecated +func (c *S3) GetBucketLifecycleWithContext(ctx aws.Context, input *GetBucketLifecycleInput, opts ...request.Option) (*GetBucketLifecycleOutput, error) { + req, out := c.GetBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" + +// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycleConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketLifecycleConfiguration for more information on using the GetBucketLifecycleConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketLifecycleConfigurationRequest method. +// req, resp := client.GetBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketLifecycleConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleConfigurationInput{} + } + + output = &GetBucketLifecycleConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, object size, or any combination +// of these. Accordingly, this section describes the latest API. The previous +// version of the API supported filtering based only on an object key name prefix, +// which is supported for backward compatibility. For the related API description, +// see GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html). +// Accordingly, this section describes the latest API. The response describes +// the new filter element that you can use to specify a filter to select a subset +// of objects to which the rule applies. If you are using a previous version +// of the lifecycle configuration, it still works. For the earlier action, +// +// Returns the lifecycle configuration information set on the bucket. For information +// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). +// +// To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration +// action. The bucket owner has this permission, by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// GetBucketLifecycleConfiguration has the following special error: +// +// - Error code: NoSuchLifecycleConfiguration Description: The lifecycle +// configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault +// Code Prefix: Client +// +// The following operations are related to GetBucketLifecycleConfiguration: +// +// - GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html) +// +// - PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) +// +// - DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLifecycleConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketLifecycleConfigurationWithContext is the same as GetBucketLifecycleConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLifecycleConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLifecycleConfigurationWithContext(ctx aws.Context, input *GetBucketLifecycleConfigurationInput, opts ...request.Option) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLocation = "GetBucketLocation" + +// GetBucketLocationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLocation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketLocation for more information on using the GetBucketLocation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketLocationRequest method. +// req, resp := client.GetBucketLocationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { + op := &request.Operation{ + Name: opGetBucketLocation, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?location", + } + + if input == nil { + input = &GetBucketLocationInput{} + } + + output = &GetBucketLocationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLocation API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns the Region the bucket resides in. You set the bucket's Region using +// the LocationConstraint request parameter in a CreateBucket request. For more +// information, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). +// +// When you use this API operation with an access point, provide the alias of +// the access point in place of the bucket name. +// +// When you use this API operation with an Object Lambda access point, provide +// the alias of the Object Lambda access point in place of the bucket name. +// If the Object Lambda access point alias in a request is not valid, the error +// code InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). +// +// We recommend that you use HeadBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html) +// to return the Region that a bucket resides in. For backward compatibility, +// Amazon S3 continues to support GetBucketLocation. +// +// The following operations are related to GetBucketLocation: +// +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLocation for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + return out, req.Send() +} + +// GetBucketLocationWithContext is the same as GetBucketLocation with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLocation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLocationWithContext(ctx aws.Context, input *GetBucketLocationInput, opts ...request.Option) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLogging = "GetBucketLogging" + +// GetBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLogging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketLogging for more information on using the GetBucketLogging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketLoggingRequest method. +// req, resp := client.GetBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { + op := &request.Operation{ + Name: opGetBucketLogging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &GetBucketLoggingInput{} + } + + output = &GetBucketLoggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLogging API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns the logging status of a bucket and the permissions users have to +// view and modify that status. +// +// The following operations are related to GetBucketLogging: +// +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// - PutBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLogging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + return out, req.Send() +} + +// GetBucketLoggingWithContext is the same as GetBucketLogging with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLogging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLoggingWithContext(ctx aws.Context, input *GetBucketLoggingInput, opts ...request.Option) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration" + +// GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketMetricsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketMetricsConfiguration for more information on using the GetBucketMetricsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketMetricsConfigurationRequest method. +// req, resp := client.GetBucketMetricsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigurationInput) (req *request.Request, output *GetBucketMetricsConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketMetricsConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &GetBucketMetricsConfigurationInput{} + } + + output = &GetBucketMetricsConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Gets a metrics configuration (specified by the metrics configuration ID) +// from the bucket. Note that this doesn't include the daily storage metrics. +// +// To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to GetBucketMetricsConfiguration: +// +// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// +// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// +// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// +// - Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketMetricsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +func (c *S3) GetBucketMetricsConfiguration(input *GetBucketMetricsConfigurationInput) (*GetBucketMetricsConfigurationOutput, error) { + req, out := c.GetBucketMetricsConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketMetricsConfigurationWithContext is the same as GetBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketMetricsConfigurationWithContext(ctx aws.Context, input *GetBucketMetricsConfigurationInput, opts ...request.Option) (*GetBucketMetricsConfigurationOutput, error) { + req, out := c.GetBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketNotification = "GetBucketNotification" + +// GetBucketNotificationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketNotification operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketNotification for more information on using the GetBucketNotification +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketNotificationRequest method. +// req, resp := client.GetBucketNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// +// Deprecated: GetBucketNotification has been deprecated +func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated") + } + op := &request.Operation{ + Name: opGetBucketNotification, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + output = &NotificationConfigurationDeprecated{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketNotification API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// No longer used, see GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketNotification for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// +// Deprecated: GetBucketNotification has been deprecated +func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { + req, out := c.GetBucketNotificationRequest(input) + return out, req.Send() +} + +// GetBucketNotificationWithContext is the same as GetBucketNotification with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketNotification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: GetBucketNotificationWithContext has been deprecated +func (c *S3) GetBucketNotificationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfigurationDeprecated, error) { + req, out := c.GetBucketNotificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration" + +// GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketNotificationConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketNotificationConfiguration for more information on using the GetBucketNotificationConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketNotificationConfigurationRequest method. +// req, resp := client.GetBucketNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) { + op := &request.Operation{ + Name: opGetBucketNotificationConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + output = &NotificationConfiguration{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketNotificationConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns the notification configuration of a bucket. +// +// If notifications are not enabled on the bucket, the action returns an empty +// NotificationConfiguration element. +// +// By default, you must be the bucket owner to read the notification configuration +// of a bucket. However, the bucket owner can use a bucket policy to grant permission +// to other users to read this configuration with the s3:GetBucketNotification +// permission. +// +// When you use this API operation with an access point, provide the alias of +// the access point in place of the bucket name. +// +// When you use this API operation with an Object Lambda access point, provide +// the alias of the Object Lambda access point in place of the bucket name. +// If the Object Lambda access point alias in a request is not valid, the error +// code InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). +// +// For more information about setting and reading the notification configuration +// on a bucket, see Setting Up Notification of Bucket Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// For more information about bucket policies, see Using Bucket Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following action is related to GetBucketNotification: +// +// - PutBucketNotification (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketNotificationConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { + req, out := c.GetBucketNotificationConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketNotificationConfigurationWithContext is the same as GetBucketNotificationConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketNotificationConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketNotificationConfigurationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfiguration, error) { + req, out := c.GetBucketNotificationConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketOwnershipControls = "GetBucketOwnershipControls" + +// GetBucketOwnershipControlsRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketOwnershipControls operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketOwnershipControls for more information on using the GetBucketOwnershipControls +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketOwnershipControlsRequest method. +// req, resp := client.GetBucketOwnershipControlsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketOwnershipControls +func (c *S3) GetBucketOwnershipControlsRequest(input *GetBucketOwnershipControlsInput) (req *request.Request, output *GetBucketOwnershipControlsOutput) { + op := &request.Operation{ + Name: opGetBucketOwnershipControls, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?ownershipControls", + } + + if input == nil { + input = &GetBucketOwnershipControlsInput{} + } + + output = &GetBucketOwnershipControlsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketOwnershipControls API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, +// you must have the s3:GetBucketOwnershipControls permission. For more information +// about Amazon S3 permissions, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html). +// +// For information about Amazon S3 Object Ownership, see Using Object Ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html). +// +// The following operations are related to GetBucketOwnershipControls: +// +// - PutBucketOwnershipControls +// +// - DeleteBucketOwnershipControls +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketOwnershipControls for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketOwnershipControls +func (c *S3) GetBucketOwnershipControls(input *GetBucketOwnershipControlsInput) (*GetBucketOwnershipControlsOutput, error) { + req, out := c.GetBucketOwnershipControlsRequest(input) + return out, req.Send() +} + +// GetBucketOwnershipControlsWithContext is the same as GetBucketOwnershipControls with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketOwnershipControls for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketOwnershipControlsWithContext(ctx aws.Context, input *GetBucketOwnershipControlsInput, opts ...request.Option) (*GetBucketOwnershipControlsOutput, error) { + req, out := c.GetBucketOwnershipControlsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketPolicy = "GetBucketPolicy" + +// GetBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketPolicy for more information on using the GetBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketPolicyRequest method. +// req, resp := client.GetBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { + op := &request.Operation{ + Name: opGetBucketPolicy, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &GetBucketPolicyInput{} + } + + output = &GetBucketPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketPolicy API operation for Amazon Simple Storage Service. +// +// Returns the policy of a specified bucket. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Regional endpoint. These endpoints support path-style +// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// # Permissions +// +// If you are using an identity other than the root user of the Amazon Web Services +// account that owns the bucket, the calling identity must both have the GetBucketPolicy +// permissions on the specified bucket and belong to the bucket owner's account +// in order to use this operation. +// +// If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a +// 405 Method Not Allowed error. +// +// To ensure that bucket owners don't inadvertently lock themselves out of their +// own buckets, the root principal in a bucket owner's Amazon Web Services account +// can perform the GetBucketPolicy, PutBucketPolicy, and DeleteBucketPolicy +// API actions, even if their bucket policy explicitly denies the root principal's +// access. Bucket owner root principals can only be blocked from performing +// these API actions by VPC endpoint policies and Amazon Web Services Organizations +// policies. +// +// - General purpose bucket permissions - The s3:GetBucketPolicy permission +// is required in a policy. For more information about general purpose buckets +// bucket policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation, +// you must have the s3express:GetBucketPolicy permission in an IAM identity-based +// policy instead of a bucket policy. Cross-account access to this API operation +// isn't supported. This operation can only be performed by the Amazon Web +// Services account that owns the resource. For more information about directory +// bucket policies and permissions, see Amazon Web Services Identity and +// Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. +// +// # Example bucket policies +// +// General purpose buckets example bucket policies - See Bucket policy examples +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html) +// in the Amazon S3 User Guide. +// +// Directory bucket example bucket policies - See Example bucket policies for +// S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// in the Amazon S3 User Guide. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. +// +// The following action is related to GetBucketPolicy: +// +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + return out, req.Send() +} + +// GetBucketPolicyWithContext is the same as GetBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketPolicyWithContext(ctx aws.Context, input *GetBucketPolicyInput, opts ...request.Option) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketPolicyStatus = "GetBucketPolicyStatus" + +// GetBucketPolicyStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketPolicyStatus operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketPolicyStatus for more information on using the GetBucketPolicyStatus +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketPolicyStatusRequest method. +// req, resp := client.GetBucketPolicyStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatus +func (c *S3) GetBucketPolicyStatusRequest(input *GetBucketPolicyStatusInput) (req *request.Request, output *GetBucketPolicyStatusOutput) { + op := &request.Operation{ + Name: opGetBucketPolicyStatus, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?policyStatus", + } + + if input == nil { + input = &GetBucketPolicyStatusInput{} + } + + output = &GetBucketPolicyStatusOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketPolicyStatus API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Retrieves the policy status for an Amazon S3 bucket, indicating whether the +// bucket is public. In order to use this operation, you must have the s3:GetBucketPolicyStatus +// permission. For more information about Amazon S3 permissions, see Specifying +// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// For more information about when Amazon S3 considers a bucket public, see +// The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// The following operations are related to GetBucketPolicyStatus: +// +// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// +// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketPolicyStatus for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatus +func (c *S3) GetBucketPolicyStatus(input *GetBucketPolicyStatusInput) (*GetBucketPolicyStatusOutput, error) { + req, out := c.GetBucketPolicyStatusRequest(input) + return out, req.Send() +} + +// GetBucketPolicyStatusWithContext is the same as GetBucketPolicyStatus with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketPolicyStatus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketPolicyStatusWithContext(ctx aws.Context, input *GetBucketPolicyStatusInput, opts ...request.Option) (*GetBucketPolicyStatusOutput, error) { + req, out := c.GetBucketPolicyStatusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketReplication = "GetBucketReplication" + +// GetBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketReplication operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketReplication for more information on using the GetBucketReplication +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketReplicationRequest method. +// req, resp := client.GetBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) { + op := &request.Operation{ + Name: opGetBucketReplication, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &GetBucketReplicationInput{} + } + + output = &GetBucketReplicationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketReplication API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns the replication configuration of a bucket. +// +// It can take a while to propagate the put or delete a replication configuration +// to all Amazon S3 systems. Therefore, a get request soon after put or delete +// can return a wrong result. +// +// For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) +// in the Amazon S3 User Guide. +// +// This action requires permissions for the s3:GetReplicationConfiguration action. +// For more information about permissions, see Using Bucket Policies and User +// Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// If you include the Filter element in a replication configuration, you must +// also include the DeleteMarkerReplication and Priority elements. The response +// also returns those elements. +// +// For information about GetBucketReplication errors, see List of replication-related +// error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) +// +// The following operations are related to GetBucketReplication: +// +// - PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) +// +// - DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketReplication for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { + req, out := c.GetBucketReplicationRequest(input) + return out, req.Send() +} + +// GetBucketReplicationWithContext is the same as GetBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketReplicationWithContext(ctx aws.Context, input *GetBucketReplicationInput, opts ...request.Option) (*GetBucketReplicationOutput, error) { + req, out := c.GetBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketRequestPayment = "GetBucketRequestPayment" + +// GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketRequestPayment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketRequestPayment for more information on using the GetBucketRequestPayment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketRequestPaymentRequest method. +// req, resp := client.GetBucketRequestPaymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opGetBucketRequestPayment, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &GetBucketRequestPaymentInput{} + } + + output = &GetBucketRequestPaymentOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketRequestPayment API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns the request payment configuration of a bucket. To use this version +// of the operation, you must be the bucket owner. For more information, see +// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). +// +// The following operations are related to GetBucketRequestPayment: +// +// - ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketRequestPayment for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { + req, out := c.GetBucketRequestPaymentRequest(input) + return out, req.Send() +} + +// GetBucketRequestPaymentWithContext is the same as GetBucketRequestPayment with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketRequestPayment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketRequestPaymentWithContext(ctx aws.Context, input *GetBucketRequestPaymentInput, opts ...request.Option) (*GetBucketRequestPaymentOutput, error) { + req, out := c.GetBucketRequestPaymentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketTagging = "GetBucketTagging" + +// GetBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketTagging for more information on using the GetBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketTaggingRequest method. +// req, resp := client.GetBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { + op := &request.Operation{ + Name: opGetBucketTagging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &GetBucketTaggingInput{} + } + + output = &GetBucketTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketTagging API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns the tag set associated with the bucket. +// +// To use this operation, you must have permission to perform the s3:GetBucketTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// GetBucketTagging has the following special error: +// +// - Error code: NoSuchTagSet Description: There is no tag set associated +// with the bucket. +// +// The following operations are related to GetBucketTagging: +// +// - PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) +// +// - DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + return out, req.Send() +} + +// GetBucketTaggingWithContext is the same as GetBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketTaggingWithContext(ctx aws.Context, input *GetBucketTaggingInput, opts ...request.Option) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketVersioning = "GetBucketVersioning" + +// GetBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketVersioning operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketVersioning for more information on using the GetBucketVersioning +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketVersioningRequest method. +// req, resp := client.GetBucketVersioningRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { + op := &request.Operation{ + Name: opGetBucketVersioning, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &GetBucketVersioningInput{} + } + + output = &GetBucketVersioningOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketVersioning API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns the versioning state of a bucket. +// +// To retrieve the versioning state of a bucket, you must be the bucket owner. +// +// This implementation also returns the MFA Delete status of the versioning +// state. If the MFA Delete status is enabled, the bucket owner must use an +// authentication device to change the versioning state of the bucket. +// +// The following operations are related to GetBucketVersioning: +// +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketVersioning for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + return out, req.Send() +} + +// GetBucketVersioningWithContext is the same as GetBucketVersioning with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketVersioning for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketVersioningWithContext(ctx aws.Context, input *GetBucketVersioningInput, opts ...request.Option) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketWebsite = "GetBucketWebsite" + +// GetBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketWebsite operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketWebsite for more information on using the GetBucketWebsite +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketWebsiteRequest method. +// req, resp := client.GetBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { + op := &request.Operation{ + Name: opGetBucketWebsite, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &GetBucketWebsiteInput{} + } + + output = &GetBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketWebsite API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns the website configuration for a bucket. To host website on Amazon +// S3, you can configure a bucket as website by adding a website configuration. +// For more information about hosting websites, see Hosting Websites on Amazon +// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// This GET action requires the S3:GetBucketWebsite permission. By default, +// only the bucket owner can read the bucket website configuration. However, +// bucket owners can allow other users to read the website configuration by +// writing a bucket policy granting them the S3:GetBucketWebsite permission. +// +// The following operations are related to GetBucketWebsite: +// +// - DeleteBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html) +// +// - PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketWebsite for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + return out, req.Send() +} + +// GetBucketWebsiteWithContext is the same as GetBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketWebsiteWithContext(ctx aws.Context, input *GetBucketWebsiteInput, opts ...request.Option) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObject = "GetObject" + +// GetObjectRequest generates a "aws/request.Request" representing the +// client's request for the GetObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObject for more information on using the GetObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetObjectRequest method. +// req, resp := client.GetObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { + op := &request.Operation{ + Name: opGetObject, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &GetObjectInput{} + } + + output = &GetObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObject API operation for Amazon Simple Storage Service. +// +// Retrieves an object from Amazon S3. +// +// In the GetObject request, specify the full key name for the object. +// +// General purpose buckets - Both the virtual-hosted-style requests and the +// path-style requests are supported. For a virtual hosted-style request example, +// if you have the object photos/2006/February/sample.jpg, specify the object +// key name as /photos/2006/February/sample.jpg. For a path-style request example, +// if you have the object photos/2006/February/sample.jpg in the bucket named +// examplebucket, specify the object key name as /examplebucket/photos/2006/February/sample.jpg. +// For more information about request types, see HTTP Host Header Bucket Specification +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket) +// in the Amazon S3 User Guide. +// +// Directory buckets - Only virtual-hosted-style requests are supported. For +// a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg +// in the bucket named examplebucket--use1-az5--x-s3, specify the object key +// name as /photos/2006/February/sample.jpg. Also, when you make requests to +// this API operation, your requests are sent to the Zonal endpoint. These endpoints +// support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - You must have the required permissions +// in a policy. To use GetObject, you must have the READ access to the object +// (or version). If you grant READ access to the anonymous user, the GetObject +// operation returns the object without using an authorization header. For +// more information, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// in the Amazon S3 User Guide. If you include a versionId in your request +// header, you must have the s3:GetObjectVersion permission to access a specific +// version of an object. The s3:GetObject permission is not required in this +// scenario. If you request the current version of an object without a specific +// versionId in the request header, only the s3:GetObject permission is required. +// The s3:GetObjectVersion permission is not required in this scenario. If +// the object that you request doesn’t exist, the error that Amazon S3 +// returns depends on whether you also have the s3:ListBucket permission. +// If you have the s3:ListBucket permission on the bucket, Amazon S3 returns +// an HTTP status code 404 Not Found error. If you don’t have the s3:ListBucket +// permission, Amazon S3 returns an HTTP status code 403 Access Denied error. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// # Storage classes +// +// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval +// storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering +// Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier, +// before you can retrieve the object you must first restore a copy using RestoreObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). +// Otherwise, this operation returns an InvalidObjectState error. For information +// about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) +// in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, only the S3 Express One Zone storage +// class is supported to store newly created objects. Unsupported storage class +// values won't write a destination object and will respond with the HTTP status +// code 400 Bad Request. +// +// # Encryption +// +// Encryption request headers, like x-amz-server-side-encryption, should not +// be sent for the GetObject requests, if your object uses server-side encryption +// with Amazon S3 managed encryption keys (SSE-S3), server-side encryption with +// Key Management Service (KMS) keys (SSE-KMS), or dual-layer server-side encryption +// with Amazon Web Services KMS keys (DSSE-KMS). If you include the header in +// your GetObject requests for the object that uses these types of keys, you’ll +// get an HTTP 400 Bad Request error. +// +// # Overriding response header values through the request +// +// There are times when you want to override certain response header values +// of a GetObject response. For example, you might override the Content-Disposition +// response header value through your GetObject request. +// +// You can override values for a set of response headers. These modified response +// header values are included only in a successful response, that is, when the +// HTTP status code 200 OK is returned. The headers you can override using the +// following query parameters in the request are a subset of the headers that +// Amazon S3 accepts when you create an object. +// +// The response headers that you can override for the GetObject response are +// Cache-Control, Content-Disposition, Content-Encoding, Content-Language, Content-Type, +// and Expires. +// +// To override values for a set of response headers in the GetObject response, +// you can use the following query parameters in the request. +// +// - response-cache-control +// +// - response-content-disposition +// +// - response-content-encoding +// +// - response-content-language +// +// - response-content-type +// +// - response-expires +// +// When you use these parameters, you must sign the request by using either +// an Authorization header or a presigned URL. These parameters cannot be used +// with an unsigned (anonymous) request. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// The following operations are related to GetObject: +// +// - ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) +// +// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObject for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// - ErrCodeInvalidObjectState "InvalidObjectState" +// Object is archived and inaccessible until restored. +// +// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval +// storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering +// Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier, +// before you can retrieve the object you must first restore a copy using RestoreObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). +// Otherwise, this operation returns an InvalidObjectState error. For information +// about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) +// in the Amazon S3 User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + return out, req.Send() +} + +// GetObjectWithContext is the same as GetObject with the addition of +// the ability to pass a context and additional request options. +// +// See GetObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectWithContext(ctx aws.Context, input *GetObjectInput, opts ...request.Option) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectAcl = "GetObjectAcl" + +// GetObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectAcl for more information on using the GetObjectAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetObjectAclRequest method. +// req, resp := client.GetObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { + op := &request.Operation{ + Name: opGetObjectAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &GetObjectAclInput{} + } + + output = &GetObjectAclOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectAcl API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns the access control list (ACL) of an object. To use this operation, +// you must have s3:GetObjectAcl permissions or READ_ACP access to the object. +// For more information, see Mapping of ACL permissions and access policy permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping) +// in the Amazon S3 User Guide +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// By default, GET returns ACL information about the current version of an object. +// To return ACL information about a different version, use the versionId subresource. +// +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// requests to read ACLs are still supported and return the bucket-owner-full-control +// ACL with the owner being the account that created the bucket. For more information, +// see Controlling object ownership and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. +// +// The following operations are related to GetObjectAcl: +// +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// +// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectAcl for usage and error information. +// +// Returned Error Codes: +// - ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + return out, req.Send() +} + +// GetObjectAclWithContext is the same as GetObjectAcl with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectAclWithContext(ctx aws.Context, input *GetObjectAclInput, opts ...request.Option) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectAttributes = "GetObjectAttributes" + +// GetObjectAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectAttributes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectAttributes for more information on using the GetObjectAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetObjectAttributesRequest method. +// req, resp := client.GetObjectAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAttributes +func (c *S3) GetObjectAttributesRequest(input *GetObjectAttributesInput) (req *request.Request, output *GetObjectAttributesOutput) { + op := &request.Operation{ + Name: opGetObjectAttributes, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?attributes", + } + + if input == nil { + input = &GetObjectAttributesInput{} + } + + output = &GetObjectAttributesOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectAttributes API operation for Amazon Simple Storage Service. +// +// Retrieves all the metadata from an object without returning the object itself. +// This operation is useful if you're interested only in an object's metadata. +// +// GetObjectAttributes combines the functionality of HeadObject and ListParts. +// All of the data returned with each of those individual calls can be returned +// with a single call to GetObjectAttributes. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - To use GetObjectAttributes, you +// must have READ access to the object. The permissions that you need to +// use this operation with depend on whether the bucket is versioned. If +// the bucket is versioned, you need both the s3:GetObjectVersion and s3:GetObjectVersionAttributes +// permissions for this operation. If the bucket is not versioned, you need +// the s3:GetObject and s3:GetObjectAttributes permissions. For more information, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// in the Amazon S3 User Guide. If the object that you request does not exist, +// the error Amazon S3 returns depends on whether you also have the s3:ListBucket +// permission. If you have the s3:ListBucket permission on the bucket, Amazon +// S3 returns an HTTP status code 404 Not Found ("no such key") error. If +// you don't have the s3:ListBucket permission, Amazon S3 returns an HTTP +// status code 403 Forbidden ("access denied") error. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// # Encryption +// +// Encryption request headers, like x-amz-server-side-encryption, should not +// be sent for HEAD requests if your object uses server-side encryption with +// Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption +// with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with +// Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption +// header is used when you PUT an object to S3 and want to specify the encryption +// method. If you include this header in a GET request for an object that uses +// these types of keys, you’ll get an HTTP 400 Bad Request error. It's because +// the encryption method can't be changed when you retrieve the object. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when +// you retrieve the metadata from the object, you must use the following headers +// to provide the encryption key for the server to be able to retrieve the object's +// metadata. The headers are: +// +// - x-amz-server-side-encryption-customer-algorithm +// +// - x-amz-server-side-encryption-customer-key +// +// - x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) +// in the Amazon S3 User Guide. +// +// Directory bucket permissions - For directory buckets, only server-side encryption +// with Amazon S3 managed keys (SSE-S3) (AES256) is supported. +// +// # Versioning +// +// Directory buckets - S3 Versioning isn't enabled and supported for directory +// buckets. For this API operation, only the null value of the version ID is +// supported by directory buckets. You can only specify null to the versionId +// query parameter in the request. +// +// # Conditional request headers +// +// Consider the following when using request headers: +// +// - If both of the If-Match and If-Unmodified-Since headers are present +// in the request as follows, then Amazon S3 returns the HTTP status code +// 200 OK and the data requested: If-Match condition evaluates to true. If-Unmodified-Since +// condition evaluates to false. For more information about conditional requests, +// see RFC 7232 (https://tools.ietf.org/html/rfc7232). +// +// - If both of the If-None-Match and If-Modified-Since headers are present +// in the request as follows, then Amazon S3 returns the HTTP status code +// 304 Not Modified: If-None-Match condition evaluates to false. If-Modified-Since +// condition evaluates to true. For more information about conditional requests, +// see RFC 7232 (https://tools.ietf.org/html/rfc7232). +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// The following actions are related to GetObjectAttributes: +// +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// +// - GetObjectLegalHold (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html) +// +// - GetObjectLockConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html) +// +// - GetObjectRetention (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html) +// +// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// +// - HeadObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html) +// +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectAttributes for usage and error information. +// +// Returned Error Codes: +// - ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAttributes +func (c *S3) GetObjectAttributes(input *GetObjectAttributesInput) (*GetObjectAttributesOutput, error) { + req, out := c.GetObjectAttributesRequest(input) + return out, req.Send() +} + +// GetObjectAttributesWithContext is the same as GetObjectAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectAttributesWithContext(ctx aws.Context, input *GetObjectAttributesInput, opts ...request.Option) (*GetObjectAttributesOutput, error) { + req, out := c.GetObjectAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectLegalHold = "GetObjectLegalHold" + +// GetObjectLegalHoldRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectLegalHold operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectLegalHold for more information on using the GetObjectLegalHold +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetObjectLegalHoldRequest method. +// req, resp := client.GetObjectLegalHoldRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold +func (c *S3) GetObjectLegalHoldRequest(input *GetObjectLegalHoldInput) (req *request.Request, output *GetObjectLegalHoldOutput) { + op := &request.Operation{ + Name: opGetObjectLegalHold, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?legal-hold", + } + + if input == nil { + input = &GetObjectLegalHoldInput{} + } + + output = &GetObjectLegalHoldOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectLegalHold API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Gets an object's current legal hold status. For more information, see Locking +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// The following action is related to GetObjectLegalHold: +// +// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectLegalHold for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold +func (c *S3) GetObjectLegalHold(input *GetObjectLegalHoldInput) (*GetObjectLegalHoldOutput, error) { + req, out := c.GetObjectLegalHoldRequest(input) + return out, req.Send() +} + +// GetObjectLegalHoldWithContext is the same as GetObjectLegalHold with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectLegalHold for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectLegalHoldWithContext(ctx aws.Context, input *GetObjectLegalHoldInput, opts ...request.Option) (*GetObjectLegalHoldOutput, error) { + req, out := c.GetObjectLegalHoldRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectLockConfiguration = "GetObjectLockConfiguration" + +// GetObjectLockConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectLockConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectLockConfiguration for more information on using the GetObjectLockConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetObjectLockConfigurationRequest method. +// req, resp := client.GetObjectLockConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration +func (c *S3) GetObjectLockConfigurationRequest(input *GetObjectLockConfigurationInput) (req *request.Request, output *GetObjectLockConfigurationOutput) { + op := &request.Operation{ + Name: opGetObjectLockConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?object-lock", + } + + if input == nil { + input = &GetObjectLockConfigurationInput{} + } + + output = &GetObjectLockConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectLockConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Gets the Object Lock configuration for a bucket. The rule specified in the +// Object Lock configuration will be applied by default to every new object +// placed in the specified bucket. For more information, see Locking Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). +// +// The following action is related to GetObjectLockConfiguration: +// +// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectLockConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration +func (c *S3) GetObjectLockConfiguration(input *GetObjectLockConfigurationInput) (*GetObjectLockConfigurationOutput, error) { + req, out := c.GetObjectLockConfigurationRequest(input) + return out, req.Send() +} + +// GetObjectLockConfigurationWithContext is the same as GetObjectLockConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectLockConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectLockConfigurationWithContext(ctx aws.Context, input *GetObjectLockConfigurationInput, opts ...request.Option) (*GetObjectLockConfigurationOutput, error) { + req, out := c.GetObjectLockConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectRetention = "GetObjectRetention" + +// GetObjectRetentionRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectRetention operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectRetention for more information on using the GetObjectRetention +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetObjectRetentionRequest method. +// req, resp := client.GetObjectRetentionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention +func (c *S3) GetObjectRetentionRequest(input *GetObjectRetentionInput) (req *request.Request, output *GetObjectRetentionOutput) { + op := &request.Operation{ + Name: opGetObjectRetention, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?retention", + } + + if input == nil { + input = &GetObjectRetentionInput{} + } + + output = &GetObjectRetentionOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectRetention API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Retrieves an object's retention settings. For more information, see Locking +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// The following action is related to GetObjectRetention: +// +// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectRetention for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention +func (c *S3) GetObjectRetention(input *GetObjectRetentionInput) (*GetObjectRetentionOutput, error) { + req, out := c.GetObjectRetentionRequest(input) + return out, req.Send() +} + +// GetObjectRetentionWithContext is the same as GetObjectRetention with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectRetention for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectRetentionWithContext(ctx aws.Context, input *GetObjectRetentionInput, opts ...request.Option) (*GetObjectRetentionOutput, error) { + req, out := c.GetObjectRetentionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectTagging = "GetObjectTagging" + +// GetObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectTagging for more information on using the GetObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetObjectTaggingRequest method. +// req, resp := client.GetObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) { + op := &request.Operation{ + Name: opGetObjectTagging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &GetObjectTaggingInput{} + } + + output = &GetObjectTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectTagging API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns the tag-set of an object. You send the GET request against the tagging +// subresource associated with the object. +// +// To use this operation, you must have permission to perform the s3:GetObjectTagging +// action. By default, the GET action returns information about current version +// of an object. For a versioned bucket, you can have multiple versions of an +// object in your bucket. To retrieve tags of any other version, use the versionId +// query parameter. You also need permission for the s3:GetObjectVersionTagging +// action. +// +// By default, the bucket owner has this permission and can grant this permission +// to others. +// +// For information about the Amazon S3 object tagging feature, see Object Tagging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// The following actions are related to GetObjectTagging: +// +// - DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) +// +// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// +// - PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) + return out, req.Send() +} + +// GetObjectTaggingWithContext is the same as GetObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectTaggingWithContext(ctx aws.Context, input *GetObjectTaggingInput, opts ...request.Option) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectTorrent = "GetObjectTorrent" + +// GetObjectTorrentRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTorrent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectTorrent for more information on using the GetObjectTorrent +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetObjectTorrentRequest method. +// req, resp := client.GetObjectTorrentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { + op := &request.Operation{ + Name: opGetObjectTorrent, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?torrent", + } + + if input == nil { + input = &GetObjectTorrentInput{} + } + + output = &GetObjectTorrentOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectTorrent API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns torrent files from a bucket. BitTorrent can save you bandwidth when +// you're distributing large files. +// +// You can get torrent only for objects that are less than 5 GB in size, and +// that are not encrypted using server-side encryption with a customer-provided +// encryption key. +// +// To use GET, you must have READ access to the object. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// The following action is related to GetObjectTorrent: +// +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectTorrent for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + return out, req.Send() +} + +// GetObjectTorrentWithContext is the same as GetObjectTorrent with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectTorrent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectTorrentWithContext(ctx aws.Context, input *GetObjectTorrentInput, opts ...request.Option) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetPublicAccessBlock = "GetPublicAccessBlock" + +// GetPublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the GetPublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetPublicAccessBlock for more information on using the GetPublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetPublicAccessBlockRequest method. +// req, resp := client.GetPublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock +func (c *S3) GetPublicAccessBlockRequest(input *GetPublicAccessBlockInput) (req *request.Request, output *GetPublicAccessBlockOutput) { + op := &request.Operation{ + Name: opGetPublicAccessBlock, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?publicAccessBlock", + } + + if input == nil { + input = &GetPublicAccessBlockInput{} + } + + output = &GetPublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetPublicAccessBlock API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To +// use this operation, you must have the s3:GetBucketPublicAccessBlock permission. +// For more information about Amazon S3 permissions, see Specifying Permissions +// in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket +// or an object, it checks the PublicAccessBlock configuration for both the +// bucket (or the bucket that contains the object) and the bucket owner's account. +// If the PublicAccessBlock settings are different between the bucket and the +// account, Amazon S3 uses the most restrictive combination of the bucket-level +// and account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// The following operations are related to GetPublicAccessBlock: +// +// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// +// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetPublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock +func (c *S3) GetPublicAccessBlock(input *GetPublicAccessBlockInput) (*GetPublicAccessBlockOutput, error) { + req, out := c.GetPublicAccessBlockRequest(input) + return out, req.Send() +} + +// GetPublicAccessBlockWithContext is the same as GetPublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See GetPublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetPublicAccessBlockWithContext(ctx aws.Context, input *GetPublicAccessBlockInput, opts ...request.Option) (*GetPublicAccessBlockOutput, error) { + req, out := c.GetPublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opHeadBucket = "HeadBucket" + +// HeadBucketRequest generates a "aws/request.Request" representing the +// client's request for the HeadBucket operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See HeadBucket for more information on using the HeadBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the HeadBucketRequest method. +// req, resp := client.HeadBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { + op := &request.Operation{ + Name: opHeadBucket, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &HeadBucketInput{} + } + + output = &HeadBucketOutput{} + req = c.newRequest(op, input, output) + return +} + +// HeadBucket API operation for Amazon Simple Storage Service. +// +// You can use this operation to determine if a bucket exists and if you have +// permission to access it. The action returns a 200 OK if the bucket exists +// and you have permission to access it. +// +// If the bucket does not exist or you do not have permission to access it, +// the HEAD request returns a generic 400 Bad Request, 403 Forbidden or 404 +// Not Found code. A message body is not included, so you cannot determine the +// exception beyond these HTTP response codes. +// +// Directory buckets - You must make requests for this API operation to the +// Zonal endpoint. These endpoints support virtual-hosted-style requests in +// the format https://bucket_name.s3express-az_id.region.amazonaws.com. Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// # Authentication and authorization +// +// All HeadBucket requests must be authenticated and signed by using IAM credentials +// (access key ID and secret access key for the IAM identities). All headers +// with the x-amz- prefix, including x-amz-copy-source, must be signed. For +// more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). +// +// Directory bucket - You must use IAM credentials to authenticate and authorize +// your access to the HeadBucket API operation, instead of using the temporary +// security credentials through the CreateSession API operation. +// +// Amazon Web Services CLI or SDKs handles authentication and authorization +// on your behalf. +// +// Permissions +// +// - General purpose bucket permissions - To use this operation, you must +// have permissions to perform the s3:ListBucket action. The bucket owner +// has this permission by default and can grant this permission to others. +// For more information about permissions, see Managing access permissions +// to your Amazon S3 resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - You must have the s3express:CreateSession +// permission in the Action element of a policy. By default, the session +// is in the ReadWrite mode. If you want to restrict the access, you can +// explicitly set the s3express:SessionMode condition key to ReadOnly on +// the bucket. For more information about example bucket policies, see Example +// bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// and Amazon Web Services Identity and Access Management (IAM) identity-based +// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) +// in the Amazon S3 User Guide. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation HeadBucket for usage and error information. +// +// Returned Error Codes: +// - ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + return out, req.Send() +} + +// HeadBucketWithContext is the same as HeadBucket with the addition of +// the ability to pass a context and additional request options. +// +// See HeadBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) HeadBucketWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.Option) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opHeadObject = "HeadObject" + +// HeadObjectRequest generates a "aws/request.Request" representing the +// client's request for the HeadObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See HeadObject for more information on using the HeadObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the HeadObjectRequest method. +// req, resp := client.HeadObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { + op := &request.Operation{ + Name: opHeadObject, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &HeadObjectInput{} + } + + output = &HeadObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// HeadObject API operation for Amazon Simple Storage Service. +// +// The HEAD operation retrieves metadata from an object without returning the +// object itself. This operation is useful if you're interested only in an object's +// metadata. +// +// A HEAD request has the same options as a GET operation on an object. The +// response is identical to the GET response except that there is no response +// body. Because of this, if the HEAD request generates an error, it returns +// a generic code, such as 400 Bad Request, 403 Forbidden, 404 Not Found, 405 +// Method Not Allowed, 412 Precondition Failed, or 304 Not Modified. It's not +// possible to retrieve the exact exception of these error codes. +// +// Request headers are limited to 8 KB in size. For more information, see Common +// Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - To use HEAD, you must have the +// s3:GetObject permission. You need the relevant read object (or version) +// permission for this operation. For more information, see Actions, resources, +// and condition keys for Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html) +// in the Amazon S3 User Guide. If the object you request doesn't exist, +// the error that Amazon S3 returns depends on whether you also have the +// s3:ListBucket permission. If you have the s3:ListBucket permission on +// the bucket, Amazon S3 returns an HTTP status code 404 Not Found error. +// If you don’t have the s3:ListBucket permission, Amazon S3 returns an +// HTTP status code 403 Forbidden error. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// # Encryption +// +// Encryption request headers, like x-amz-server-side-encryption, should not +// be sent for HEAD requests if your object uses server-side encryption with +// Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption +// with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with +// Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption +// header is used when you PUT an object to S3 and want to specify the encryption +// method. If you include this header in a HEAD request for an object that uses +// these types of keys, you’ll get an HTTP 400 Bad Request error. It's because +// the encryption method can't be changed when you retrieve the object. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when +// you retrieve the metadata from the object, you must use the following headers +// to provide the encryption key for the server to be able to retrieve the object's +// metadata. The headers are: +// +// - x-amz-server-side-encryption-customer-algorithm +// +// - x-amz-server-side-encryption-customer-key +// +// - x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) +// in the Amazon S3 User Guide. +// +// Directory bucket permissions - For directory buckets, only server-side encryption +// with Amazon S3 managed keys (SSE-S3) (AES256) is supported. +// +// Versioning +// +// - If the current version of the object is a delete marker, Amazon S3 behaves +// as if the object was deleted and includes x-amz-delete-marker: true in +// the response. +// +// - If the specified version is a delete marker, the response returns a +// 405 Method Not Allowed error and the Last-Modified: timestamp response +// header. +// +// - Directory buckets - Delete marker is not supported by directory buckets. +// +// - Directory buckets - S3 Versioning isn't enabled and supported for directory +// buckets. For this API operation, only the null value of the version ID +// is supported by directory buckets. You can only specify null to the versionId +// query parameter in the request. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// The following actions are related to HeadObject: +// +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses +// for more information on returned errors. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation HeadObject for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + return out, req.Send() +} + +// HeadObjectWithContext is the same as HeadObject with the addition of +// the ability to pass a context and additional request options. +// +// See HeadObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) HeadObjectWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.Option) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations" + +// ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketAnalyticsConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBucketAnalyticsConfigurations for more information on using the ListBucketAnalyticsConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListBucketAnalyticsConfigurationsRequest method. +// req, resp := client.ListBucketAnalyticsConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalyticsConfigurationsInput) (req *request.Request, output *ListBucketAnalyticsConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketAnalyticsConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &ListBucketAnalyticsConfigurationsInput{} + } + + output = &ListBucketAnalyticsConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketAnalyticsConfigurations API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Lists the analytics configurations for the bucket. You can have up to 1,000 +// analytics configurations per bucket. +// +// This action supports list pagination and does not return more than 100 configurations +// at a time. You should always check the IsTruncated element in the response. +// If there are no more configurations to list, IsTruncated is set to false. +// If there are more configurations to list, IsTruncated is set to true, and +// there will be a value in NextContinuationToken. You use the NextContinuationToken +// value to continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// For information about Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// +// The following operations are related to ListBucketAnalyticsConfigurations: +// +// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// +// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// +// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketAnalyticsConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) { + req, out := c.ListBucketAnalyticsConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketAnalyticsConfigurationsWithContext is the same as ListBucketAnalyticsConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketAnalyticsConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketAnalyticsConfigurationsWithContext(ctx aws.Context, input *ListBucketAnalyticsConfigurationsInput, opts ...request.Option) (*ListBucketAnalyticsConfigurationsOutput, error) { + req, out := c.ListBucketAnalyticsConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketIntelligentTieringConfigurations = "ListBucketIntelligentTieringConfigurations" + +// ListBucketIntelligentTieringConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketIntelligentTieringConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBucketIntelligentTieringConfigurations for more information on using the ListBucketIntelligentTieringConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListBucketIntelligentTieringConfigurationsRequest method. +// req, resp := client.ListBucketIntelligentTieringConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketIntelligentTieringConfigurations +func (c *S3) ListBucketIntelligentTieringConfigurationsRequest(input *ListBucketIntelligentTieringConfigurationsInput) (req *request.Request, output *ListBucketIntelligentTieringConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketIntelligentTieringConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &ListBucketIntelligentTieringConfigurationsInput{} + } + + output = &ListBucketIntelligentTieringConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketIntelligentTieringConfigurations API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Lists the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in three low latency and high throughput +// access tiers. To get the lowest storage cost on data that can be accessed +// in minutes to hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of +// object size or retention period. If the size of an object is less than 128 +// KB, it is not monitored and not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the Frequent Access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// For more information, see Storage class for automatically optimizing frequently +// and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to ListBucketIntelligentTieringConfigurations include: +// +// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketIntelligentTieringConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketIntelligentTieringConfigurations +func (c *S3) ListBucketIntelligentTieringConfigurations(input *ListBucketIntelligentTieringConfigurationsInput) (*ListBucketIntelligentTieringConfigurationsOutput, error) { + req, out := c.ListBucketIntelligentTieringConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketIntelligentTieringConfigurationsWithContext is the same as ListBucketIntelligentTieringConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketIntelligentTieringConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketIntelligentTieringConfigurationsWithContext(ctx aws.Context, input *ListBucketIntelligentTieringConfigurationsInput, opts ...request.Option) (*ListBucketIntelligentTieringConfigurationsOutput, error) { + req, out := c.ListBucketIntelligentTieringConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" + +// ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketInventoryConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBucketInventoryConfigurations for more information on using the ListBucketInventoryConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListBucketInventoryConfigurationsRequest method. +// req, resp := client.ListBucketInventoryConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventoryConfigurationsInput) (req *request.Request, output *ListBucketInventoryConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketInventoryConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &ListBucketInventoryConfigurationsInput{} + } + + output = &ListBucketInventoryConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketInventoryConfigurations API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns a list of inventory configurations for the bucket. You can have up +// to 1,000 analytics configurations per bucket. +// +// This action supports list pagination and does not return more than 100 configurations +// at a time. Always check the IsTruncated element in the response. If there +// are no more configurations to list, IsTruncated is set to false. If there +// are more configurations to list, IsTruncated is set to true, and there is +// a value in NextContinuationToken. You use the NextContinuationToken value +// to continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// +// The following operations are related to ListBucketInventoryConfigurations: +// +// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// +// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) +// +// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketInventoryConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +func (c *S3) ListBucketInventoryConfigurations(input *ListBucketInventoryConfigurationsInput) (*ListBucketInventoryConfigurationsOutput, error) { + req, out := c.ListBucketInventoryConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketInventoryConfigurationsWithContext is the same as ListBucketInventoryConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketInventoryConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketInventoryConfigurationsWithContext(ctx aws.Context, input *ListBucketInventoryConfigurationsInput, opts ...request.Option) (*ListBucketInventoryConfigurationsOutput, error) { + req, out := c.ListBucketInventoryConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations" + +// ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketMetricsConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBucketMetricsConfigurations for more information on using the ListBucketMetricsConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListBucketMetricsConfigurationsRequest method. +// req, resp := client.ListBucketMetricsConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConfigurationsInput) (req *request.Request, output *ListBucketMetricsConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketMetricsConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &ListBucketMetricsConfigurationsInput{} + } + + output = &ListBucketMetricsConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketMetricsConfigurations API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Lists the metrics configurations for the bucket. The metrics configurations +// are only for the request metrics of the bucket and do not provide information +// on daily storage metrics. You can have up to 1,000 configurations per bucket. +// +// This action supports list pagination and does not return more than 100 configurations +// at a time. Always check the IsTruncated element in the response. If there +// are no more configurations to list, IsTruncated is set to false. If there +// are more configurations to list, IsTruncated is set to true, and there is +// a value in NextContinuationToken. You use the NextContinuationToken value +// to continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// For more information about metrics configurations and CloudWatch request +// metrics, see Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to ListBucketMetricsConfigurations: +// +// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// +// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) +// +// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketMetricsConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +func (c *S3) ListBucketMetricsConfigurations(input *ListBucketMetricsConfigurationsInput) (*ListBucketMetricsConfigurationsOutput, error) { + req, out := c.ListBucketMetricsConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketMetricsConfigurationsWithContext is the same as ListBucketMetricsConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketMetricsConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketMetricsConfigurationsWithContext(ctx aws.Context, input *ListBucketMetricsConfigurationsInput, opts ...request.Option) (*ListBucketMetricsConfigurationsOutput, error) { + req, out := c.ListBucketMetricsConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBuckets = "ListBuckets" + +// ListBucketsRequest generates a "aws/request.Request" representing the +// client's request for the ListBuckets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBuckets for more information on using the ListBuckets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListBucketsRequest method. +// req, resp := client.ListBucketsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { + op := &request.Operation{ + Name: opListBuckets, + HTTPMethod: "GET", + HTTPPath: "/", + } + + if input == nil { + input = &ListBucketsInput{} + } + + output = &ListBucketsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBuckets API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns a list of all buckets owned by the authenticated sender of the request. +// To use this operation, you must have the s3:ListAllMyBuckets permission. +// +// For information about Amazon S3 buckets, see Creating, configuring, and working +// with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBuckets for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + return out, req.Send() +} + +// ListBucketsWithContext is the same as ListBuckets with the addition of +// the ability to pass a context and additional request options. +// +// See ListBuckets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketsWithContext(ctx aws.Context, input *ListBucketsInput, opts ...request.Option) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListDirectoryBuckets = "ListDirectoryBuckets" + +// ListDirectoryBucketsRequest generates a "aws/request.Request" representing the +// client's request for the ListDirectoryBuckets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListDirectoryBuckets for more information on using the ListDirectoryBuckets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListDirectoryBucketsRequest method. +// req, resp := client.ListDirectoryBucketsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListDirectoryBuckets +func (c *S3) ListDirectoryBucketsRequest(input *ListDirectoryBucketsInput) (req *request.Request, output *ListDirectoryBucketsOutput) { + op := &request.Operation{ + Name: opListDirectoryBuckets, + HTTPMethod: "GET", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ContinuationToken"}, + OutputTokens: []string{"ContinuationToken"}, + LimitToken: "MaxDirectoryBuckets", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDirectoryBucketsInput{} + } + + output = &ListDirectoryBucketsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListDirectoryBuckets API operation for Amazon Simple Storage Service. +// +// Returns a list of all Amazon S3 directory buckets owned by the authenticated +// sender of the request. For more information about directory buckets, see +// Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) +// in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Regional endpoint. These endpoints support path-style +// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// # Permissions +// +// You must have the s3express:ListAllMyDirectoryBuckets permission in an IAM +// identity-based policy instead of a bucket policy. Cross-account access to +// this API operation isn't supported. This operation can only be performed +// by the Amazon Web Services account that owns the resource. For more information +// about directory bucket policies and permissions, see Amazon Web Services +// Identity and Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListDirectoryBuckets for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListDirectoryBuckets +func (c *S3) ListDirectoryBuckets(input *ListDirectoryBucketsInput) (*ListDirectoryBucketsOutput, error) { + req, out := c.ListDirectoryBucketsRequest(input) + return out, req.Send() +} + +// ListDirectoryBucketsWithContext is the same as ListDirectoryBuckets with the addition of +// the ability to pass a context and additional request options. +// +// See ListDirectoryBuckets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListDirectoryBucketsWithContext(ctx aws.Context, input *ListDirectoryBucketsInput, opts ...request.Option) (*ListDirectoryBucketsOutput, error) { + req, out := c.ListDirectoryBucketsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListDirectoryBucketsPages iterates over the pages of a ListDirectoryBuckets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDirectoryBuckets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDirectoryBuckets operation. +// pageNum := 0 +// err := client.ListDirectoryBucketsPages(params, +// func(page *s3.ListDirectoryBucketsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *S3) ListDirectoryBucketsPages(input *ListDirectoryBucketsInput, fn func(*ListDirectoryBucketsOutput, bool) bool) error { + return c.ListDirectoryBucketsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListDirectoryBucketsPagesWithContext same as ListDirectoryBucketsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListDirectoryBucketsPagesWithContext(ctx aws.Context, input *ListDirectoryBucketsInput, fn func(*ListDirectoryBucketsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDirectoryBucketsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDirectoryBucketsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDirectoryBucketsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListMultipartUploads = "ListMultipartUploads" + +// ListMultipartUploadsRequest generates a "aws/request.Request" representing the +// client's request for the ListMultipartUploads operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListMultipartUploads for more information on using the ListMultipartUploads +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListMultipartUploadsRequest method. +// req, resp := client.ListMultipartUploadsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { + op := &request.Operation{ + Name: opListMultipartUploads, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?uploads", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "UploadIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"}, + LimitToken: "MaxUploads", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListMultipartUploadsInput{} + } + + output = &ListMultipartUploadsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListMultipartUploads API operation for Amazon Simple Storage Service. +// +// This operation lists in-progress multipart uploads in a bucket. An in-progress +// multipart upload is a multipart upload that has been initiated by the CreateMultipartUpload +// request, but has not yet been completed or aborted. +// +// Directory buckets - If multipart uploads in a directory bucket are in progress, +// you can't delete the bucket until all the in-progress multipart uploads are +// aborted or completed. +// +// The ListMultipartUploads operation returns a maximum of 1,000 multipart uploads +// in the response. The limit of 1,000 multipart uploads is also the default +// value. You can further limit the number of uploads in a response by specifying +// the max-uploads request parameter. If there are more than 1,000 multipart +// uploads that satisfy your ListMultipartUploads request, the response returns +// an IsTruncated element with the value of true, a NextKeyMarker element, and +// a NextUploadIdMarker element. To list the remaining multipart uploads, you +// need to make subsequent ListMultipartUploads requests. In these requests, +// include two query parameters: key-marker and upload-id-marker. Set the value +// of key-marker to the NextKeyMarker value from the previous response. Similarly, +// set the value of upload-id-marker to the NextUploadIdMarker value from the +// previous response. +// +// Directory buckets - The upload-id-marker element and the NextUploadIdMarker +// element aren't supported by directory buckets. To list the additional multipart +// uploads, you only need to set the value of key-marker to the NextKeyMarker +// value from the previous response. +// +// For more information about multipart uploads, see Uploading Objects Using +// Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - For information about permissions +// required to use the multipart upload API, see Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// Sorting of multipart uploads in response +// +// - General purpose bucket - In the ListMultipartUploads response, the multipart +// uploads are sorted based on two criteria: Key-based sorting - Multipart +// uploads are initially sorted in ascending order based on their object +// keys. Time-based sorting - For uploads that share the same object key, +// they are further sorted in ascending order based on the upload initiation +// time. Among uploads with the same key, the one that was initiated first +// will appear before the ones that were initiated later. +// +// - Directory bucket - In the ListMultipartUploads response, the multipart +// uploads aren't sorted lexicographically based on the object keys. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// The following operations are related to ListMultipartUploads: +// +// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListMultipartUploads for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + return out, req.Send() +} + +// ListMultipartUploadsWithContext is the same as ListMultipartUploads with the addition of +// the ability to pass a context and additional request options. +// +// See ListMultipartUploads for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipartUploadsInput, opts ...request.Option) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListMultipartUploads method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListMultipartUploads operation. +// pageNum := 0 +// err := client.ListMultipartUploadsPages(params, +// func(page *s3.ListMultipartUploadsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool) error { + return c.ListMultipartUploadsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListMultipartUploadsPagesWithContext same as ListMultipartUploadsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListMultipartUploadsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListMultipartUploadsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListObjectVersions = "ListObjectVersions" + +// ListObjectVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjectVersions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListObjectVersions for more information on using the ListObjectVersions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListObjectVersionsRequest method. +// req, resp := client.ListObjectVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { + op := &request.Operation{ + Name: opListObjectVersions, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versions", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "VersionIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectVersionsInput{} + } + + output = &ListObjectVersionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListObjectVersions API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns metadata about all versions of the objects in a bucket. You can also +// use request parameters as selection criteria to return metadata about a subset +// of all the object versions. +// +// To use this operation, you must have permission to perform the s3:ListBucketVersions +// action. Be aware of the name difference. +// +// A 200 OK response can contain valid or invalid XML. Make sure to design your +// application to parse the contents of the response and handle it appropriately. +// +// To use this operation, you must have READ access to the bucket. +// +// The following operations are related to ListObjectVersions: +// +// - ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) +// +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjectVersions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + return out, req.Send() +} + +// ListObjectVersionsWithContext is the same as ListObjectVersions with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjectVersions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVersionsInput, opts ...request.Option) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjectVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjectVersions operation. +// pageNum := 0 +// err := client.ListObjectVersionsPages(params, +// func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool) error { + return c.ListObjectVersionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectVersionsPagesWithContext same as ListObjectVersionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectVersionsPagesWithContext(ctx aws.Context, input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectVersionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectVersionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListObjects = "ListObjects" + +// ListObjectsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjects operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListObjects for more information on using the ListObjects +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListObjectsRequest method. +// req, resp := client.ListObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { + op := &request.Operation{ + Name: opListObjects, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker || Contents[-1].Key"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectsInput{} + } + + output = &ListObjectsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListObjects API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns some or all (up to 1,000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. A 200 OK response can contain valid or invalid XML. Be sure +// to design your application to parse the contents of the response and handle +// it appropriately. +// +// This action has been revised. We recommend that you use the newer version, +// ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html), +// when developing applications. For backward compatibility, Amazon S3 continues +// to support ListObjects. +// +// The following operations are related to ListObjects: +// +// - ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) +// +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// - ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjects for usage and error information. +// +// Returned Error Codes: +// - ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + return out, req.Send() +} + +// ListObjectsWithContext is the same as ListObjects with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjects for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, opts ...request.Option) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectsPages iterates over the pages of a ListObjects operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjects method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjects operation. +// pageNum := 0 +// err := client.ListObjectsPages(params, +// func(page *s3.ListObjectsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool) error { + return c.ListObjectsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectsPagesWithContext same as ListObjectsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsPagesWithContext(ctx aws.Context, input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListObjectsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListObjectsV2 = "ListObjectsV2" + +// ListObjectsV2Request generates a "aws/request.Request" representing the +// client's request for the ListObjectsV2 operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListObjectsV2 for more information on using the ListObjectsV2 +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListObjectsV2Request method. +// req, resp := client.ListObjectsV2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) { + op := &request.Operation{ + Name: opListObjectsV2, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?list-type=2", + Paginator: &request.Paginator{ + InputTokens: []string{"ContinuationToken"}, + OutputTokens: []string{"NextContinuationToken"}, + LimitToken: "MaxKeys", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListObjectsV2Input{} + } + + output = &ListObjectsV2Output{} + req = c.newRequest(op, input, output) + return +} + +// ListObjectsV2 API operation for Amazon Simple Storage Service. +// +// Returns some or all (up to 1,000) of the objects in a bucket with each request. +// You can use the request parameters as selection criteria to return a subset +// of the objects in a bucket. A 200 OK response can contain valid or invalid +// XML. Make sure to design your application to parse the contents of the response +// and handle it appropriately. For more information about listing objects, +// see Listing object keys programmatically (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html) +// in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html). +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - To use this operation, you must +// have READ access to the bucket. You must have permission to perform the +// s3:ListBucket action. The bucket owner has this permission by default +// and can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// Sorting order of returned objects +// +// - General purpose bucket - For general purpose buckets, ListObjectsV2 +// returns objects in lexicographical order based on their key names. +// +// - Directory bucket - For directory buckets, ListObjectsV2 does not return +// objects in lexicographical order. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// This section describes the latest revision of this action. We recommend that +// you use this revised API operation for application development. For backward +// compatibility, Amazon S3 continues to support the prior version of this API +// operation, ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html). +// +// The following operations are related to ListObjectsV2: +// +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjectsV2 for usage and error information. +// +// Returned Error Codes: +// - ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) + return out, req.Send() +} + +// ListObjectsV2WithContext is the same as ListObjectsV2 with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjectsV2 for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input, opts ...request.Option) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjectsV2 method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjectsV2 operation. +// pageNum := 0 +// err := client.ListObjectsV2Pages(params, +// func(page *s3.ListObjectsV2Output, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool) error { + return c.ListObjectsV2PagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectsV2PagesWithContext same as ListObjectsV2Pages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsV2PagesWithContext(ctx aws.Context, input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectsV2Input + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectsV2Request(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListParts = "ListParts" + +// ListPartsRequest generates a "aws/request.Request" representing the +// client's request for the ListParts operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListParts for more information on using the ListParts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListPartsRequest method. +// req, resp := client.ListPartsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { + op := &request.Operation{ + Name: opListParts, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + Paginator: &request.Paginator{ + InputTokens: []string{"PartNumberMarker"}, + OutputTokens: []string{"NextPartNumberMarker"}, + LimitToken: "MaxParts", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListPartsInput{} + } + + output = &ListPartsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListParts API operation for Amazon Simple Storage Service. +// +// Lists the parts that have been uploaded for a specific multipart upload. +// +// To use this operation, you must provide the upload ID in the request. You +// obtain this uploadID by sending the initiate multipart upload request through +// CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). +// +// The ListParts request returns a maximum of 1,000 uploaded parts. The limit +// of 1,000 parts is also the default value. You can restrict the number of +// parts in a response by specifying the max-parts request parameter. If your +// multipart upload consists of more than 1,000 parts, the response returns +// an IsTruncated field with the value of true, and a NextPartNumberMarker element. +// To list remaining uploaded parts, in subsequent ListParts requests, include +// the part-number-marker query string parameter and set its value to the NextPartNumberMarker +// field value from the previous response. +// +// For more information on multipart uploads, see Uploading Objects Using Multipart +// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - For information about permissions +// required to use the multipart upload API, see Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. If the upload was created using server-side +// encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer +// server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), you +// must have permission to the kms:Decrypt action for the ListParts request +// to succeed. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// The following operations are related to ListParts: +// +// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// +// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListParts for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + return out, req.Send() +} + +// ListPartsWithContext is the same as ListParts with the addition of +// the ability to pass a context and additional request options. +// +// See ListParts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts ...request.Option) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListPartsPages iterates over the pages of a ListParts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListParts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListParts operation. +// pageNum := 0 +// err := client.ListPartsPages(params, +// func(page *s3.ListPartsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *S3) ListPartsPages(input *ListPartsInput, fn func(*ListPartsOutput, bool) bool) error { + return c.ListPartsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPartsPagesWithContext same as ListPartsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInput, fn func(*ListPartsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPartsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPartsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" + +// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAccelerateConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketAccelerateConfiguration for more information on using the PutBucketAccelerateConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketAccelerateConfigurationRequest method. +// req, resp := client.PutBucketAccelerateConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketAccelerateConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?accelerate", + } + + if input == nil { + input = &PutBucketAccelerateConfigurationInput{} + } + + output = &PutBucketAccelerateConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer +// Acceleration is a bucket-level feature that enables you to perform faster +// data transfers to Amazon S3. +// +// To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// The Transfer Acceleration state of a bucket can be set to one of the following +// two values: +// +// - Enabled – Enables accelerated data transfers to the bucket. +// +// - Suspended – Disables accelerated data transfers to the bucket. +// +// The GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) +// action returns the transfer acceleration state of a bucket. +// +// After setting the Transfer Acceleration state of a bucket to Enabled, it +// might take up to thirty minutes before the data transfer rates to the bucket +// increase. +// +// The name of the bucket used for Transfer Acceleration must be DNS-compliant +// and must not contain periods ("."). +// +// For more information about transfer acceleration, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). +// +// The following operations are related to PutBucketAccelerateConfiguration: +// +// - GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) +// +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAccelerateConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) { + req, out := c.PutBucketAccelerateConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketAccelerateConfigurationWithContext is the same as PutBucketAccelerateConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAccelerateConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAccelerateConfigurationWithContext(ctx aws.Context, input *PutBucketAccelerateConfigurationInput, opts ...request.Option) (*PutBucketAccelerateConfigurationOutput, error) { + req, out := c.PutBucketAccelerateConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketAcl = "PutBucketAcl" + +// PutBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketAcl for more information on using the PutBucketAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketAclRequest method. +// req, resp := client.PutBucketAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { + op := &request.Operation{ + Name: opPutBucketAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &PutBucketAclInput{} + } + + output = &PutBucketAclOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketAcl API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Sets the permissions on an existing bucket using access control lists (ACL). +// For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// To set the ACL of a bucket, you must have the WRITE_ACP permission. +// +// You can use one of the following two ways to set a bucket's permissions: +// +// - Specify the ACL in the request body +// +// - Specify permissions using request headers +// +// You cannot specify access permission using both the body and the request +// headers. +// +// Depending on your application needs, you may choose to set the ACL on a bucket +// using either the request body or the headers. For example, if you have an +// existing application that updates a bucket ACL using the request body, then +// you can continue to use that approach. +// +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// ACLs are disabled and no longer affect permissions. You must use policies +// to grant access to your bucket and the objects in it. Requests to set ACLs +// or update ACLs fail and return the AccessControlListNotSupported error code. +// Requests to read ACLs are still supported. For more information, see Controlling +// object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. +// +// # Permissions +// +// You can set access permissions by using one of the following methods: +// +// - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. Specify the canned ACL name +// as the value of x-amz-acl. If you use this header, you cannot use other +// access control-specific headers in your request. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// - Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using +// these headers, you specify explicit access permissions and grantees (Amazon +// Web Services accounts or Amazon S3 groups) who will receive the permission. +// If you use these ACL-specific headers, you cannot use the x-amz-acl header +// to set a canned ACL. These parameters map to the set of permissions that +// Amazon S3 supports in an ACL. For more information, see Access Control +// List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// You specify each grantee as a type=value pair, where the type is one of +// the following: id – if the value specified is the canonical user ID +// of an Amazon Web Services account uri – if you are granting permissions +// to a predefined group emailAddress – if the value specified is the email +// address of an Amazon Web Services account Using email addresses to specify +// a grantee is only supported in the following Amazon Web Services Regions: +// US East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific +// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) +// South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the Amazon Web Services General Reference. For example, the following +// x-amz-grant-write header grants create, overwrite, and delete objects +// permission to LogDelivery group predefined by Amazon S3 and two Amazon +// Web Services accounts identified by their email addresses. x-amz-grant-write: +// uri="http://acs.amazonaws.com/groups/s3/LogDelivery", id="111122223333", +// id="555566667777" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// # Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// - By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request +// +// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// - By Email address: <>Grantees@email.com<>& +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. Using email addresses +// to specify a grantee is only supported in the following Amazon Web Services +// Regions: US East (N. Virginia) US West (N. California) US West (Oregon) +// Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe +// (Ireland) South America (São Paulo) For a list of all the Amazon S3 supported +// Regions and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the Amazon Web Services General Reference. +// +// The following operations are related to PutBucketAcl: +// +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAcl for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + return out, req.Send() +} + +// PutBucketAclWithContext is the same as PutBucketAcl with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAclWithContext(ctx aws.Context, input *PutBucketAclInput, opts ...request.Option) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration" + +// PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAnalyticsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketAnalyticsConfiguration for more information on using the PutBucketAnalyticsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketAnalyticsConfigurationRequest method. +// req, resp := client.PutBucketAnalyticsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsConfigurationInput) (req *request.Request, output *PutBucketAnalyticsConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketAnalyticsConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &PutBucketAnalyticsConfigurationInput{} + } + + output = &PutBucketAnalyticsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Sets an analytics configuration for the bucket (specified by the analytics +// configuration ID). You can have up to 1,000 analytics configurations per +// bucket. +// +// You can choose to have storage class analysis export analysis reports sent +// to a comma-separated values (CSV) flat file. See the DataExport request element. +// Reports are updated daily and are based on the object filters that you configure. +// When selecting data export, you specify a destination bucket and an optional +// destination prefix where the file is written. You can export the data to +// a destination bucket in a different account. However, the destination bucket +// must be in the same Region as the bucket that you are making the PUT analytics +// configuration to. For more information, see Amazon S3 Analytics – Storage +// Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// +// You must create a bucket policy on the destination bucket where the exported +// file is written to grant permissions to Amazon S3 to write objects to the +// bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory +// and Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). +// +// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// PutBucketAnalyticsConfiguration has the following special errors: +// +// - HTTP Error: HTTP 400 Bad Request Code: InvalidArgument Cause: Invalid +// argument. +// +// - HTTP Error: HTTP 400 Bad Request Code: TooManyConfigurations Cause: +// You are attempting to create a new configuration but have already reached +// the 1,000-configuration limit. +// +// - HTTP Error: HTTP 403 Forbidden Code: AccessDenied Cause: You are not +// the owner of the specified bucket, or you do not have the s3:PutAnalyticsConfiguration +// bucket permission to set the configuration on the bucket. +// +// The following operations are related to PutBucketAnalyticsConfiguration: +// +// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// +// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// +// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAnalyticsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +func (c *S3) PutBucketAnalyticsConfiguration(input *PutBucketAnalyticsConfigurationInput) (*PutBucketAnalyticsConfigurationOutput, error) { + req, out := c.PutBucketAnalyticsConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketAnalyticsConfigurationWithContext is the same as PutBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *PutBucketAnalyticsConfigurationInput, opts ...request.Option) (*PutBucketAnalyticsConfigurationOutput, error) { + req, out := c.PutBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketCors = "PutBucketCors" + +// PutBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketCors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketCors for more information on using the PutBucketCors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketCorsRequest method. +// req, resp := client.PutBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { + op := &request.Operation{ + Name: opPutBucketCors, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &PutBucketCorsInput{} + } + + output = &PutBucketCorsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketCors API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Sets the cors configuration for your bucket. If the configuration exists, +// Amazon S3 replaces it. +// +// To use this operation, you must be allowed to perform the s3:PutBucketCORS +// action. By default, the bucket owner has this permission and can grant it +// to others. +// +// You set this configuration on a bucket so that the bucket can service cross-origin +// requests. For example, you might want to enable a request whose origin is +// http://www.example.com to access your Amazon S3 bucket at my.example.bucket.com +// by using the browser's XMLHttpRequest capability. +// +// To enable cross-origin resource sharing (CORS) on a bucket, you add the cors +// subresource to the bucket. The cors subresource is an XML document in which +// you configure rules that identify origins and the HTTP methods that can be +// executed on your bucket. The document is limited to 64 KB in size. +// +// When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) +// against a bucket, it evaluates the cors configuration on the bucket and uses +// the first CORSRule rule that matches the incoming browser request to enable +// a cross-origin request. For a rule to match, the following conditions must +// be met: +// +// - The request's Origin header must match AllowedOrigin elements. +// +// - The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method +// header in case of a pre-flight OPTIONS request must be one of the AllowedMethod +// elements. +// +// - Every header specified in the Access-Control-Request-Headers request +// header of a pre-flight request must match an AllowedHeader element. +// +// For more information about CORS, go to Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon +// S3 User Guide. +// +// The following operations are related to PutBucketCors: +// +// - GetBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html) +// +// - DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) +// +// - RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketCors for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + return out, req.Send() +} + +// PutBucketCorsWithContext is the same as PutBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketCorsWithContext(ctx aws.Context, input *PutBucketCorsInput, opts ...request.Option) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketEncryption = "PutBucketEncryption" + +// PutBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketEncryption for more information on using the PutBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketEncryptionRequest method. +// req, resp := client.PutBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption +func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *request.Request, output *PutBucketEncryptionOutput) { + op := &request.Operation{ + Name: opPutBucketEncryption, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &PutBucketEncryptionInput{} + } + + output = &PutBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketEncryption API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// This action uses the encryption subresource to configure default encryption +// and Amazon S3 Bucket Keys for an existing bucket. +// +// By default, all buckets have a default encryption configuration that uses +// server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally +// configure default encryption for a bucket by using server-side encryption +// with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side +// encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default +// encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html). If you +// use PutBucketEncryption to set your default bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 +// does not validate the KMS key ID provided in PutBucketEncryption requests. +// +// This action requires Amazon Web Services Signature Version 4. For more information, +// see Authenticating Requests (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). +// +// To use this operation, you must have permission to perform the s3:PutEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// The following operations are related to PutBucketEncryption: +// +// - GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) +// +// - DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption +func (c *S3) PutBucketEncryption(input *PutBucketEncryptionInput) (*PutBucketEncryptionOutput, error) { + req, out := c.PutBucketEncryptionRequest(input) + return out, req.Send() +} + +// PutBucketEncryptionWithContext is the same as PutBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketEncryptionWithContext(ctx aws.Context, input *PutBucketEncryptionInput, opts ...request.Option) (*PutBucketEncryptionOutput, error) { + req, out := c.PutBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketIntelligentTieringConfiguration = "PutBucketIntelligentTieringConfiguration" + +// PutBucketIntelligentTieringConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketIntelligentTieringConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketIntelligentTieringConfiguration for more information on using the PutBucketIntelligentTieringConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketIntelligentTieringConfigurationRequest method. +// req, resp := client.PutBucketIntelligentTieringConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketIntelligentTieringConfiguration +func (c *S3) PutBucketIntelligentTieringConfigurationRequest(input *PutBucketIntelligentTieringConfigurationInput) (req *request.Request, output *PutBucketIntelligentTieringConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketIntelligentTieringConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &PutBucketIntelligentTieringConfigurationInput{} + } + + output = &PutBucketIntelligentTieringConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Puts a S3 Intelligent-Tiering configuration to the specified bucket. You +// can have up to 1,000 S3 Intelligent-Tiering configurations per bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in three low latency and high throughput +// access tiers. To get the lowest storage cost on data that can be accessed +// in minutes to hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of +// object size or retention period. If the size of an object is less than 128 +// KB, it is not monitored and not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the Frequent Access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// For more information, see Storage class for automatically optimizing frequently +// and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to PutBucketIntelligentTieringConfiguration include: +// +// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// +// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// +// You only need S3 Intelligent-Tiering enabled on a bucket if you want to automatically +// move objects stored in the S3 Intelligent-Tiering storage class to the Archive +// Access or Deep Archive Access tier. +// +// PutBucketIntelligentTieringConfiguration has the following special errors: +// +// # HTTP 400 Bad Request Error +// +// Code: InvalidArgument +// +// Cause: Invalid Argument +// +// # HTTP 400 Bad Request Error +// +// Code: TooManyConfigurations +// +// Cause: You are attempting to create a new configuration but have already +// reached the 1,000-configuration limit. +// +// # HTTP 403 Forbidden Error +// +// Cause: You are not the owner of the specified bucket, or you do not have +// the s3:PutIntelligentTieringConfiguration bucket permission to set the configuration +// on the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketIntelligentTieringConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketIntelligentTieringConfiguration +func (c *S3) PutBucketIntelligentTieringConfiguration(input *PutBucketIntelligentTieringConfigurationInput) (*PutBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.PutBucketIntelligentTieringConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketIntelligentTieringConfigurationWithContext is the same as PutBucketIntelligentTieringConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketIntelligentTieringConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketIntelligentTieringConfigurationWithContext(ctx aws.Context, input *PutBucketIntelligentTieringConfigurationInput, opts ...request.Option) (*PutBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.PutBucketIntelligentTieringConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" + +// PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketInventoryConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketInventoryConfiguration for more information on using the PutBucketInventoryConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketInventoryConfigurationRequest method. +// req, resp := client.PutBucketInventoryConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryConfigurationInput) (req *request.Request, output *PutBucketInventoryConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketInventoryConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &PutBucketInventoryConfigurationInput{} + } + + output = &PutBucketInventoryConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// This implementation of the PUT action adds an inventory configuration (identified +// by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations +// per bucket. +// +// Amazon S3 inventory generates inventories of the objects in the bucket on +// a daily or weekly basis, and the results are published to a flat file. The +// bucket that is inventoried is called the source bucket, and the bucket where +// the inventory flat file is stored is called the destination bucket. The destination +// bucket must be in the same Amazon Web Services Region as the source bucket. +// +// When you configure an inventory for a source bucket, you specify the destination +// bucket where you want the inventory to be stored, and whether to generate +// the inventory daily or weekly. You can also configure what object metadata +// to include and whether to inventory all object versions or only current versions. +// For more information, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// in the Amazon S3 User Guide. +// +// You must create a bucket policy on the destination bucket to grant permissions +// to Amazon S3 to write objects to the bucket in the defined location. For +// an example policy, see Granting Permissions for Amazon S3 Inventory and Storage +// Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). +// +// # Permissions +// +// To use this operation, you must have permission to perform the s3:PutInventoryConfiguration +// action. The bucket owner has this permission by default and can grant this +// permission to others. +// +// The s3:PutInventoryConfiguration permission allows a user to create an S3 +// Inventory (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html) +// report that includes all object metadata fields available and to specify +// the destination bucket to store the inventory. A user with read access to +// objects in the destination bucket can also access all object metadata fields +// that are available in the inventory report. +// +// To restrict access to an inventory report, see Restricting access to an Amazon +// S3 Inventory report (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html#example-bucket-policies-use-case-10) +// in the Amazon S3 User Guide. For more information about the metadata fields +// available in S3 Inventory, see Amazon S3 Inventory lists (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html#storage-inventory-contents) +// in the Amazon S3 User Guide. For more information about permissions, see +// Permissions related to bucket subresource operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Identity and access management in Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// PutBucketInventoryConfiguration has the following special errors: +// +// # HTTP 400 Bad Request Error +// +// Code: InvalidArgument +// +// Cause: Invalid Argument +// +// # HTTP 400 Bad Request Error +// +// Code: TooManyConfigurations +// +// Cause: You are attempting to create a new configuration but have already +// reached the 1,000-configuration limit. +// +// # HTTP 403 Forbidden Error +// +// Cause: You are not the owner of the specified bucket, or you do not have +// the s3:PutInventoryConfiguration bucket permission to set the configuration +// on the bucket. +// +// The following operations are related to PutBucketInventoryConfiguration: +// +// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// +// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) +// +// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketInventoryConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) { + req, out := c.PutBucketInventoryConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketInventoryConfigurationWithContext is the same as PutBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketInventoryConfigurationWithContext(ctx aws.Context, input *PutBucketInventoryConfigurationInput, opts ...request.Option) (*PutBucketInventoryConfigurationOutput, error) { + req, out := c.PutBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLifecycle = "PutBucketLifecycle" + +// PutBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycle operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketLifecycle for more information on using the PutBucketLifecycle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketLifecycleRequest method. +// req, resp := client.PutBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// +// Deprecated: PutBucketLifecycle has been deprecated +func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated") + } + op := &request.Operation{ + Name: opPutBucketLifecycle, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleInput{} + } + + output = &PutBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketLifecycle API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// For an updated version of this API, see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html). +// This version has been deprecated. Existing lifecycle configurations will +// work. For new lifecycle configurations, use the updated API. +// +// Creates a new lifecycle configuration for the bucket or replaces an existing +// lifecycle configuration. For information about lifecycle configuration, see +// Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in the Amazon S3 User Guide. +// +// By default, all Amazon S3 resources, including buckets, objects, and related +// subresources (for example, lifecycle configuration and website configuration) +// are private. Only the resource owner, the Amazon Web Services account that +// created the resource, can access it. The resource owner can optionally grant +// access permissions to others by writing an access policy. For this operation, +// users must get the s3:PutLifecycleConfiguration permission. +// +// You can also explicitly deny permissions. Explicit denial also supersedes +// any other permissions. If you want to prevent users or accounts from removing +// or deleting objects from your bucket, you must deny them permissions for +// the following actions: +// +// - s3:DeleteObject +// +// - s3:DeleteObjectVersion +// +// - s3:PutLifecycleConfiguration +// +// For more information about permissions, see Managing Access Permissions to +// your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// For more examples of transitioning objects to storage classes such as STANDARD_IA +// or ONEZONE_IA, see Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#lifecycle-configuration-examples). +// +// The following operations are related to PutBucketLifecycle: +// +// - GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html)(Deprecated) +// +// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// - RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) +// +// - By default, a resource owner—in this case, a bucket owner, which is +// the Amazon Web Services account that created the bucket—can perform +// any of the operations. A resource owner can also grant others permission +// to perform the operation. For more information, see the following topics +// in the Amazon S3 User Guide: Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLifecycle for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// +// Deprecated: PutBucketLifecycle has been deprecated +func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) + return out, req.Send() +} + +// PutBucketLifecycleWithContext is the same as PutBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: PutBucketLifecycleWithContext has been deprecated +func (c *S3) PutBucketLifecycleWithContext(ctx aws.Context, input *PutBucketLifecycleInput, opts ...request.Option) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" + +// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycleConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketLifecycleConfiguration for more information on using the PutBucketLifecycleConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketLifecycleConfigurationRequest method. +// req, resp := client.PutBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketLifecycleConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleConfigurationInput{} + } + + output = &PutBucketLifecycleConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Creates a new lifecycle configuration for the bucket or replaces an existing +// lifecycle configuration. Keep in mind that this will overwrite an existing +// lifecycle configuration, so if you want to retain any configuration details, +// they must be included in the new lifecycle configuration. For information +// about lifecycle configuration, see Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html). +// +// Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, object size, or any combination +// of these. Accordingly, this section describes the latest API. The previous +// version of the API supported filtering based only on an object key name prefix, +// which is supported for backward compatibility. For the related API description, +// see PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html). +// +// # Rules +// +// You specify the lifecycle configuration in your request body. The lifecycle +// configuration is specified as XML consisting of one or more rules. An Amazon +// S3 Lifecycle configuration can have up to 1,000 rules. This limit is not +// adjustable. Each rule consists of the following: +// +// - A filter identifying a subset of objects to which the rule applies. +// The filter can be based on a key name prefix, object tags, object size, +// or any combination of these. +// +// - A status indicating whether the rule is in effect. +// +// - One or more lifecycle transition and expiration actions that you want +// Amazon S3 to perform on the objects identified by the filter. If the state +// of your bucket is versioning-enabled or versioning-suspended, you can +// have many versions of the same object (one current version and zero or +// more noncurrent versions). Amazon S3 provides predefined actions that +// you can specify for current and noncurrent object versions. +// +// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// and Lifecycle Configuration Elements (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html). +// +// # Permissions +// +// By default, all Amazon S3 resources are private, including buckets, objects, +// and related subresources (for example, lifecycle configuration and website +// configuration). Only the resource owner (that is, the Amazon Web Services +// account that created it) can access the resource. The resource owner can +// optionally grant access permissions to others by writing an access policy. +// For this operation, a user must get the s3:PutLifecycleConfiguration permission. +// +// You can also explicitly deny permissions. An explicit deny also supersedes +// any other permissions. If you want to block users or accounts from removing +// or deleting objects from your bucket, you must deny them permissions for +// the following actions: +// +// - s3:DeleteObject +// +// - s3:DeleteObjectVersion +// +// - s3:PutLifecycleConfiguration +// +// For more information about permissions, see Managing Access Permissions to +// Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// The following operations are related to PutBucketLifecycleConfiguration: +// +// - Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html) +// +// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// - DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLifecycleConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketLifecycleConfigurationWithContext is the same as PutBucketLifecycleConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLifecycleConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLifecycleConfigurationWithContext(ctx aws.Context, input *PutBucketLifecycleConfigurationInput, opts ...request.Option) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLogging = "PutBucketLogging" + +// PutBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLogging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketLogging for more information on using the PutBucketLogging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketLoggingRequest method. +// req, resp := client.PutBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { + op := &request.Operation{ + Name: opPutBucketLogging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &PutBucketLoggingInput{} + } + + output = &PutBucketLoggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketLogging API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Set the logging parameters for a bucket and to specify permissions for who +// can view and modify the logging parameters. All logs are saved to buckets +// in the same Amazon Web Services Region as the source bucket. To set the logging +// status of a bucket, you must be the bucket owner. +// +// The bucket owner is automatically granted FULL_CONTROL to all logs. You use +// the Grantee request element to grant access to other people. The Permissions +// request element specifies the kind of access the grantee has to the logs. +// +// If the target bucket for log delivery uses the bucket owner enforced setting +// for S3 Object Ownership, you can't use the Grantee request element to grant +// access to others. Permissions can only be granted using policies. For more +// information, see Permissions for server access log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) +// in the Amazon S3 User Guide. +// +// # Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (by using request elements) in the following ways: +// +// - By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request. +// +// - By Email address: <>Grantees@email.com<> +// The grantee is resolved to the CanonicalUser and, in a response to a GETObjectAcl +// request, appears as the CanonicalUser. +// +// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// To enable logging, you use LoggingEnabled and its children request elements. +// To disable logging, you use an empty BucketLoggingStatus request element: +// +// +// +// For more information about server access logging, see Server Access Logging +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html) in +// the Amazon S3 User Guide. +// +// For more information about creating a bucket, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). +// For more information about returning the logging status of a bucket, see +// GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html). +// +// The following operations are related to PutBucketLogging: +// +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// - GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLogging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + return out, req.Send() +} + +// PutBucketLoggingWithContext is the same as PutBucketLogging with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLogging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLoggingWithContext(ctx aws.Context, input *PutBucketLoggingInput, opts ...request.Option) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration" + +// PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketMetricsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketMetricsConfiguration for more information on using the PutBucketMetricsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketMetricsConfigurationRequest method. +// req, resp := client.PutBucketMetricsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigurationInput) (req *request.Request, output *PutBucketMetricsConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketMetricsConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &PutBucketMetricsConfigurationInput{} + } + + output = &PutBucketMetricsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Sets a metrics configuration (specified by the metrics configuration ID) +// for the bucket. You can have up to 1,000 metrics configurations per bucket. +// If you're updating an existing metrics configuration, note that this is a +// full replacement of the existing metrics configuration. If you don't include +// the elements you want to keep, they are erased. +// +// To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to PutBucketMetricsConfiguration: +// +// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// +// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) +// +// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// +// PutBucketMetricsConfiguration has the following special error: +// +// - Error code: TooManyConfigurations Description: You are attempting to +// create a new configuration but have already reached the 1,000-configuration +// limit. HTTP Status Code: HTTP 400 Bad Request +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketMetricsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +func (c *S3) PutBucketMetricsConfiguration(input *PutBucketMetricsConfigurationInput) (*PutBucketMetricsConfigurationOutput, error) { + req, out := c.PutBucketMetricsConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketMetricsConfigurationWithContext is the same as PutBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketMetricsConfigurationWithContext(ctx aws.Context, input *PutBucketMetricsConfigurationInput, opts ...request.Option) (*PutBucketMetricsConfigurationOutput, error) { + req, out := c.PutBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketNotification = "PutBucketNotification" + +// PutBucketNotificationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotification operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketNotification for more information on using the PutBucketNotification +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketNotificationRequest method. +// req, resp := client.PutBucketNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// +// Deprecated: PutBucketNotification has been deprecated +func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated") + } + op := &request.Operation{ + Name: opPutBucketNotification, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationInput{} + } + + output = &PutBucketNotificationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketNotification API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// No longer used, see the PutBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotificationConfiguration.html) +// operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketNotification for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// +// Deprecated: PutBucketNotification has been deprecated +func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) + return out, req.Send() +} + +// PutBucketNotificationWithContext is the same as PutBucketNotification with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketNotification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: PutBucketNotificationWithContext has been deprecated +func (c *S3) PutBucketNotificationWithContext(ctx aws.Context, input *PutBucketNotificationInput, opts ...request.Option) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration" + +// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotificationConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketNotificationConfiguration for more information on using the PutBucketNotificationConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketNotificationConfigurationRequest method. +// req, resp := client.PutBucketNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketNotificationConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationConfigurationInput{} + } + + output = &PutBucketNotificationConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketNotificationConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Enables notifications of specified events for a bucket. For more information +// about event notifications, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// +// Using this API, you can replace an existing notification configuration. The +// configuration is an XML file that defines the event types that you want Amazon +// S3 to publish and the destination where you want Amazon S3 to publish an +// event notification when it detects an event of the specified type. +// +// By default, your bucket has no event notifications configured. That is, the +// notification configuration will be an empty NotificationConfiguration. +// +// +// +// +// +// This action replaces the existing notification configuration with the configuration +// you include in the request body. +// +// After Amazon S3 receives this request, it first verifies that any Amazon +// Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon +// SQS) destination exists, and that the bucket owner has permission to publish +// to it by sending a test notification. In the case of Lambda destinations, +// Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission +// to invoke the function from the Amazon S3 bucket. For more information, see +// Configuring Notifications for Amazon S3 Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// +// You can disable notifications by adding the empty NotificationConfiguration +// element. +// +// For more information about the number of event notification configurations +// that you can create per bucket, see Amazon S3 service quotas (https://docs.aws.amazon.com/general/latest/gr/s3.html#limits_s3) +// in Amazon Web Services General Reference. +// +// By default, only the bucket owner can configure notifications on a bucket. +// However, bucket owners can use a bucket policy to grant permission to other +// users to set this configuration with the required s3:PutBucketNotification +// permission. +// +// The PUT notification is an atomic operation. For example, suppose your notification +// configuration includes SNS topic, SQS queue, and Lambda function configurations. +// When you send a PUT request with this configuration, Amazon S3 sends test +// messages to your SNS topic. If the message fails, the entire PUT action will +// fail, and Amazon S3 will not add the configuration to your bucket. +// +// If the configuration in the request body includes only one TopicConfiguration +// specifying only the s3:ReducedRedundancyLostObject event type, the response +// will also include the x-amz-sns-test-message-id header containing the message +// ID of the test notification sent to the topic. +// +// The following action is related to PutBucketNotificationConfiguration: +// +// - GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketNotificationConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketNotificationConfigurationWithContext is the same as PutBucketNotificationConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketNotificationConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketNotificationConfigurationWithContext(ctx aws.Context, input *PutBucketNotificationConfigurationInput, opts ...request.Option) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketOwnershipControls = "PutBucketOwnershipControls" + +// PutBucketOwnershipControlsRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketOwnershipControls operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketOwnershipControls for more information on using the PutBucketOwnershipControls +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketOwnershipControlsRequest method. +// req, resp := client.PutBucketOwnershipControlsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketOwnershipControls +func (c *S3) PutBucketOwnershipControlsRequest(input *PutBucketOwnershipControlsInput) (req *request.Request, output *PutBucketOwnershipControlsOutput) { + op := &request.Operation{ + Name: opPutBucketOwnershipControls, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?ownershipControls", + } + + if input == nil { + input = &PutBucketOwnershipControlsInput{} + } + + output = &PutBucketOwnershipControlsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketOwnershipControls API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this +// operation, you must have the s3:PutBucketOwnershipControls permission. For +// more information about Amazon S3 permissions, see Specifying permissions +// in a policy (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html). +// +// For information about Amazon S3 Object Ownership, see Using object ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html). +// +// The following operations are related to PutBucketOwnershipControls: +// +// - GetBucketOwnershipControls +// +// - DeleteBucketOwnershipControls +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketOwnershipControls for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketOwnershipControls +func (c *S3) PutBucketOwnershipControls(input *PutBucketOwnershipControlsInput) (*PutBucketOwnershipControlsOutput, error) { + req, out := c.PutBucketOwnershipControlsRequest(input) + return out, req.Send() +} + +// PutBucketOwnershipControlsWithContext is the same as PutBucketOwnershipControls with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketOwnershipControls for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketOwnershipControlsWithContext(ctx aws.Context, input *PutBucketOwnershipControlsInput, opts ...request.Option) (*PutBucketOwnershipControlsOutput, error) { + req, out := c.PutBucketOwnershipControlsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketPolicy = "PutBucketPolicy" + +// PutBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketPolicy for more information on using the PutBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketPolicyRequest method. +// req, resp := client.PutBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { + op := &request.Operation{ + Name: opPutBucketPolicy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &PutBucketPolicyInput{} + } + + output = &PutBucketPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketPolicy API operation for Amazon Simple Storage Service. +// +// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Regional endpoint. These endpoints support path-style +// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// # Permissions +// +// If you are using an identity other than the root user of the Amazon Web Services +// account that owns the bucket, the calling identity must both have the PutBucketPolicy +// permissions on the specified bucket and belong to the bucket owner's account +// in order to use this operation. +// +// If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a +// 405 Method Not Allowed error. +// +// To ensure that bucket owners don't inadvertently lock themselves out of their +// own buckets, the root principal in a bucket owner's Amazon Web Services account +// can perform the GetBucketPolicy, PutBucketPolicy, and DeleteBucketPolicy +// API actions, even if their bucket policy explicitly denies the root principal's +// access. Bucket owner root principals can only be blocked from performing +// these API actions by VPC endpoint policies and Amazon Web Services Organizations +// policies. +// +// - General purpose bucket permissions - The s3:PutBucketPolicy permission +// is required in a policy. For more information about general purpose buckets +// bucket policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation, +// you must have the s3express:PutBucketPolicy permission in an IAM identity-based +// policy instead of a bucket policy. Cross-account access to this API operation +// isn't supported. This operation can only be performed by the Amazon Web +// Services account that owns the resource. For more information about directory +// bucket policies and permissions, see Amazon Web Services Identity and +// Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. +// +// # Example bucket policies +// +// General purpose buckets example bucket policies - See Bucket policy examples +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html) +// in the Amazon S3 User Guide. +// +// Directory bucket example bucket policies - See Example bucket policies for +// S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// in the Amazon S3 User Guide. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. +// +// The following operations are related to PutBucketPolicy: +// +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + return out, req.Send() +} + +// PutBucketPolicyWithContext is the same as PutBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketPolicyWithContext(ctx aws.Context, input *PutBucketPolicyInput, opts ...request.Option) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketReplication = "PutBucketReplication" + +// PutBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketReplication operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketReplication for more information on using the PutBucketReplication +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketReplicationRequest method. +// req, resp := client.PutBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { + op := &request.Operation{ + Name: opPutBucketReplication, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &PutBucketReplicationInput{} + } + + output = &PutBucketReplicationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketReplication API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Creates a replication configuration or replaces an existing one. For more +// information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) +// in the Amazon S3 User Guide. +// +// Specify the replication configuration in the request body. In the replication +// configuration, you provide the name of the destination bucket or buckets +// where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 +// can assume to replicate objects on your behalf, and other relevant information. +// You can invoke this request for a specific Amazon Web Services Region by +// using the aws:RequestedRegion (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-requestedregion) +// condition key. +// +// A replication configuration must include at least one rule, and can contain +// a maximum of 1,000. Each rule identifies a subset of objects to replicate +// by filtering the objects in the source bucket. To choose additional subsets +// of objects to replicate, add a rule for each subset. +// +// To specify a subset of the objects in the source bucket to apply a replication +// rule to, add the Filter element as a child of the Rule element. You can filter +// objects based on an object key prefix, one or more object tags, or both. +// When you add the Filter element in the configuration, you must also add the +// following elements: DeleteMarkerReplication, Status, and Priority. +// +// If you are using an earlier version of the replication configuration, Amazon +// S3 handles replication of delete markers differently. For more information, +// see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). +// +// For information about enabling versioning on a bucket, see Using Versioning +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). +// +// # Handling Replication of Encrypted Objects +// +// By default, Amazon S3 doesn't replicate objects that are stored at rest using +// server-side encryption with KMS keys. To replicate Amazon Web Services KMS-encrypted +// objects, add the following: SourceSelectionCriteria, SseKmsEncryptedObjects, +// Status, EncryptionConfiguration, and ReplicaKmsKeyID. For information about +// replication configuration, see Replicating Objects Created with SSE Using +// KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html). +// +// For information on PutBucketReplication errors, see List of replication-related +// error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) +// +// # Permissions +// +// To create a PutBucketReplication request, you must have s3:PutReplicationConfiguration +// permissions for the bucket. +// +// By default, a resource owner, in this case the Amazon Web Services account +// that created the bucket, can perform this operation. The resource owner can +// also grant others permissions to perform the operation. For more information +// about permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// To perform this operation, the user or role performing the action must have +// the iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) +// permission. +// +// The following operations are related to PutBucketReplication: +// +// - GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) +// +// - DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketReplication for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) + return out, req.Send() +} + +// PutBucketReplicationWithContext is the same as PutBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketReplicationWithContext(ctx aws.Context, input *PutBucketReplicationInput, opts ...request.Option) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketRequestPayment = "PutBucketRequestPayment" + +// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketRequestPayment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketRequestPayment for more information on using the PutBucketRequestPayment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketRequestPaymentRequest method. +// req, resp := client.PutBucketRequestPaymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opPutBucketRequestPayment, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &PutBucketRequestPaymentInput{} + } + + output = &PutBucketRequestPaymentOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketRequestPayment API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Sets the request payment configuration for a bucket. By default, the bucket +// owner pays for downloads from the bucket. This configuration parameter enables +// the bucket owner (only) to specify that the person requesting the download +// will be charged for the download. For more information, see Requester Pays +// Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). +// +// The following operations are related to PutBucketRequestPayment: +// +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// - GetBucketRequestPayment (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketRequestPayment for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) + return out, req.Send() +} + +// PutBucketRequestPaymentWithContext is the same as PutBucketRequestPayment with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketRequestPayment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketRequestPaymentWithContext(ctx aws.Context, input *PutBucketRequestPaymentInput, opts ...request.Option) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketTagging = "PutBucketTagging" + +// PutBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketTagging for more information on using the PutBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketTaggingRequest method. +// req, resp := client.PutBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { + op := &request.Operation{ + Name: opPutBucketTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &PutBucketTaggingInput{} + } + + output = &PutBucketTaggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketTagging API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Sets the tags for a bucket. +// +// Use tags to organize your Amazon Web Services bill to reflect your own cost +// structure. To do this, sign up to get your Amazon Web Services account bill +// with tag key values included. Then, to see the cost of combined resources, +// organize your billing information according to resources with the same tag +// key values. For example, you can tag several resources with a specific application +// name, and then organize your billing information to see the total cost of +// that application across several services. For more information, see Cost +// Allocation and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) +// and Using Cost Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html). +// +// When this operation sets the tags for a bucket, it will overwrite any current +// tags the bucket already has. You cannot use this operation to add tags to +// an existing list of tags. +// +// To use this operation, you must have permissions to perform the s3:PutBucketTagging +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// PutBucketTagging has the following special errors. For more Amazon S3 errors +// see, Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html). +// +// - InvalidTag - The tag provided was not a valid tag. This error can occur +// if the tag did not pass input validation. For more information, see Using +// Cost Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html). +// +// - MalformedXML - The XML provided does not match the schema. +// +// - OperationAborted - A conflicting conditional action is currently in +// progress against this resource. Please try again. +// +// - InternalError - The service was unable to apply the provided tag to +// the bucket. +// +// The following operations are related to PutBucketTagging: +// +// - GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) +// +// - DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + return out, req.Send() +} + +// PutBucketTaggingWithContext is the same as PutBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketTaggingWithContext(ctx aws.Context, input *PutBucketTaggingInput, opts ...request.Option) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketVersioning = "PutBucketVersioning" + +// PutBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketVersioning operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketVersioning for more information on using the PutBucketVersioning +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketVersioningRequest method. +// req, resp := client.PutBucketVersioningRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { + op := &request.Operation{ + Name: opPutBucketVersioning, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &PutBucketVersioningInput{} + } + + output = &PutBucketVersioningOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketVersioning API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Sets the versioning state of an existing bucket. +// +// You can set the versioning state with one of the following values: +// +// Enabled—Enables versioning for the objects in the bucket. All objects added +// to the bucket receive a unique version ID. +// +// Suspended—Disables versioning for the objects in the bucket. All objects +// added to the bucket receive the version ID null. +// +// If the versioning state has never been set on a bucket, it has no versioning +// state; a GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) +// request does not return a versioning state value. +// +// In order to enable MFA Delete, you must be the bucket owner. If you are the +// bucket owner and want to enable MFA Delete in the bucket versioning configuration, +// you must include the x-amz-mfa request header and the Status and the MfaDelete +// request elements in a request to set the versioning state of the bucket. +// +// If you have an object expiration lifecycle configuration in your non-versioned +// bucket and you want to maintain the same permanent delete behavior when you +// enable versioning, you must add a noncurrent expiration policy. The noncurrent +// expiration lifecycle configuration will manage the deletes of the noncurrent +// object versions in the version-enabled bucket. (A version-enabled bucket +// maintains one current and zero or more noncurrent object versions.) For more +// information, see Lifecycle and Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config). +// +// The following operations are related to PutBucketVersioning: +// +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// - GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketVersioning for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + return out, req.Send() +} + +// PutBucketVersioningWithContext is the same as PutBucketVersioning with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketVersioning for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketVersioningWithContext(ctx aws.Context, input *PutBucketVersioningInput, opts ...request.Option) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketWebsite = "PutBucketWebsite" + +// PutBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketWebsite operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketWebsite for more information on using the PutBucketWebsite +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketWebsiteRequest method. +// req, resp := client.PutBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { + op := &request.Operation{ + Name: opPutBucketWebsite, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &PutBucketWebsiteInput{} + } + + output = &PutBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketWebsite API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Sets the configuration of the website that is specified in the website subresource. +// To configure a bucket as a website, you can add this subresource on the bucket +// with website configuration information such as the file name of the index +// document and any redirect rules. For more information, see Hosting Websites +// on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// This PUT action requires the S3:PutBucketWebsite permission. By default, +// only the bucket owner can configure the website attached to a bucket; however, +// bucket owners can allow other users to set the website configuration by writing +// a bucket policy that grants them the S3:PutBucketWebsite permission. +// +// To redirect all website requests sent to the bucket's website endpoint, you +// add a website configuration with the following elements. Because all requests +// are sent to another website, you don't need to provide index document name +// for the bucket. +// +// - WebsiteConfiguration +// +// - RedirectAllRequestsTo +// +// - HostName +// +// - Protocol +// +// If you want granular control over redirects, you can use the following elements +// to add routing rules that describe conditions for redirecting requests and +// information about the redirect destination. In this case, the website configuration +// must provide an index document for the bucket, because some requests might +// not be redirected. +// +// - WebsiteConfiguration +// +// - IndexDocument +// +// - Suffix +// +// - ErrorDocument +// +// - Key +// +// - RoutingRules +// +// - RoutingRule +// +// - Condition +// +// - HttpErrorCodeReturnedEquals +// +// - KeyPrefixEquals +// +// - Redirect +// +// - Protocol +// +// - HostName +// +// - ReplaceKeyPrefixWith +// +// - ReplaceKeyWith +// +// - HttpRedirectCode +// +// Amazon S3 has a limitation of 50 routing rules per website configuration. +// If you require more than 50 routing rules, you can use object redirect. For +// more information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) +// in the Amazon S3 User Guide. +// +// The maximum request length is limited to 128 KB. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketWebsite for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + return out, req.Send() +} + +// PutBucketWebsiteWithContext is the same as PutBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketWebsiteWithContext(ctx aws.Context, input *PutBucketWebsiteInput, opts ...request.Option) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObject = "PutObject" + +// PutObjectRequest generates a "aws/request.Request" representing the +// client's request for the PutObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObject for more information on using the PutObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutObjectRequest method. +// req, resp := client.PutObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { + op := &request.Operation{ + Name: opPutObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &PutObjectInput{} + } + + output = &PutObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutObject API operation for Amazon Simple Storage Service. +// +// Adds an object to a bucket. +// +// - Amazon S3 never adds partial objects; if you receive a success response, +// Amazon S3 added the entire object to the bucket. You cannot use PutObject +// to only update a single piece of metadata for an existing object. You +// must put the entire object with updated metadata if you want to update +// some values. +// +// - If your bucket uses the bucket owner enforced setting for Object Ownership, +// ACLs are disabled and no longer affect permissions. All objects written +// to the bucket by any account will be owned by the bucket owner. +// +// - Directory buckets - For directory buckets, you must make requests for +// this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Amazon S3 is a distributed system. If it receives multiple write requests +// for the same object simultaneously, it overwrites all but the last object +// written. However, Amazon S3 provides features that can modify this behavior: +// +// - S3 Object Lock - To prevent objects from being deleted or overwritten, +// you can use Amazon S3 Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html) +// in the Amazon S3 User Guide. This functionality is not supported for directory +// buckets. +// +// - S3 Versioning - When you enable versioning for a bucket, if Amazon S3 +// receives multiple write requests for the same object simultaneously, it +// stores all versions of the objects. For each write request that is made +// to the same object, Amazon S3 automatically generates a unique version +// ID of that object being stored in Amazon S3. You can retrieve, replace, +// or delete any version of the object. For more information about versioning, +// see Adding Objects to Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html) +// in the Amazon S3 User Guide. For information about returning the versioning +// state of a bucket, see GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). +// This functionality is not supported for directory buckets. +// +// Permissions +// +// - General purpose bucket permissions - The following permissions are required +// in your policies when your PutObject request includes specific headers. +// s3:PutObject - To successfully complete the PutObject request, you must +// always have the s3:PutObject permission on a bucket to add an object to +// it. s3:PutObjectAcl - To successfully change the objects ACL of your PutObject +// request, you must have the s3:PutObjectAcl. s3:PutObjectTagging - To successfully +// set the tag-set with your PutObject request, you must have the s3:PutObjectTagging. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// Data integrity with Content-MD5 +// +// - General purpose bucket - To ensure that data is not corrupted traversing +// the network, use the Content-MD5 header. When you use this header, Amazon +// S3 checks the object against the provided MD5 value and, if they do not +// match, Amazon S3 returns an error. Alternatively, when the object's ETag +// is its MD5 digest, you can calculate the MD5 while putting the object +// to Amazon S3 and compare the returned ETag to the calculated MD5 value. +// +// - Directory bucket - This functionality is not supported for directory +// buckets. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// For more information about related Amazon S3 APIs, see the following: +// +// - CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// +// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObject for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + return out, req.Send() +} + +// PutObjectWithContext is the same as PutObject with the addition of +// the ability to pass a context and additional request options. +// +// See PutObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectWithContext(ctx aws.Context, input *PutObjectInput, opts ...request.Option) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectAcl = "PutObjectAcl" + +// PutObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectAcl for more information on using the PutObjectAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutObjectAclRequest method. +// req, resp := client.PutObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { + op := &request.Operation{ + Name: opPutObjectAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &PutObjectAclInput{} + } + + output = &PutObjectAclOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutObjectAcl API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Uses the acl subresource to set the access control list (ACL) permissions +// for a new or existing object in an S3 bucket. You must have the WRITE_ACP +// permission to set the ACL of an object. For more information, see What permissions +// can I grant? (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions) +// in the Amazon S3 User Guide. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// Depending on your application needs, you can choose to set the ACL on an +// object using either the request body or the headers. For example, if you +// have an existing application that updates a bucket ACL using the request +// body, you can continue to use that approach. For more information, see Access +// Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// in the Amazon S3 User Guide. +// +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// ACLs are disabled and no longer affect permissions. You must use policies +// to grant access to your bucket and the objects in it. Requests to set ACLs +// or update ACLs fail and return the AccessControlListNotSupported error code. +// Requests to read ACLs are still supported. For more information, see Controlling +// object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. +// +// # Permissions +// +// You can set access permissions using one of the following methods: +// +// - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. Specify the canned ACL name +// as the value of x-amz-acl. If you use this header, you cannot use other +// access control-specific headers in your request. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// - Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using +// these headers, you specify explicit access permissions and grantees (Amazon +// Web Services accounts or Amazon S3 groups) who will receive the permission. +// If you use these ACL-specific headers, you cannot use x-amz-acl header +// to set a canned ACL. These parameters map to the set of permissions that +// Amazon S3 supports in an ACL. For more information, see Access Control +// List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// You specify each grantee as a type=value pair, where the type is one of +// the following: id – if the value specified is the canonical user ID +// of an Amazon Web Services account uri – if you are granting permissions +// to a predefined group emailAddress – if the value specified is the email +// address of an Amazon Web Services account Using email addresses to specify +// a grantee is only supported in the following Amazon Web Services Regions: +// US East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific +// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) +// South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the Amazon Web Services General Reference. For example, the following +// x-amz-grant-read header grants list objects permission to the two Amazon +// Web Services accounts identified by their email addresses. x-amz-grant-read: +// emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// # Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// - By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request. +// +// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// - By Email address: <>Grantees@email.com<>lt;/Grantee> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. Using email addresses +// to specify a grantee is only supported in the following Amazon Web Services +// Regions: US East (N. Virginia) US West (N. California) US West (Oregon) +// Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe +// (Ireland) South America (São Paulo) For a list of all the Amazon S3 supported +// Regions and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the Amazon Web Services General Reference. +// +// # Versioning +// +// The ACL of an object is set at the object version level. By default, PUT +// sets the ACL of the current version of an object. To set the ACL of a different +// version, use the versionId subresource. +// +// The following operations are related to PutObjectAcl: +// +// - CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectAcl for usage and error information. +// +// Returned Error Codes: +// - ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + return out, req.Send() +} + +// PutObjectAclWithContext is the same as PutObjectAcl with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectAclWithContext(ctx aws.Context, input *PutObjectAclInput, opts ...request.Option) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectLegalHold = "PutObjectLegalHold" + +// PutObjectLegalHoldRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectLegalHold operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectLegalHold for more information on using the PutObjectLegalHold +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutObjectLegalHoldRequest method. +// req, resp := client.PutObjectLegalHoldRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold +func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *request.Request, output *PutObjectLegalHoldOutput) { + op := &request.Operation{ + Name: opPutObjectLegalHold, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?legal-hold", + } + + if input == nil { + input = &PutObjectLegalHoldInput{} + } + + output = &PutObjectLegalHoldOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutObjectLegalHold API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Applies a legal hold configuration to the specified object. For more information, +// see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectLegalHold for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold +func (c *S3) PutObjectLegalHold(input *PutObjectLegalHoldInput) (*PutObjectLegalHoldOutput, error) { + req, out := c.PutObjectLegalHoldRequest(input) + return out, req.Send() +} + +// PutObjectLegalHoldWithContext is the same as PutObjectLegalHold with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectLegalHold for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectLegalHoldWithContext(ctx aws.Context, input *PutObjectLegalHoldInput, opts ...request.Option) (*PutObjectLegalHoldOutput, error) { + req, out := c.PutObjectLegalHoldRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectLockConfiguration = "PutObjectLockConfiguration" + +// PutObjectLockConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectLockConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectLockConfiguration for more information on using the PutObjectLockConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutObjectLockConfigurationRequest method. +// req, resp := client.PutObjectLockConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration +func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfigurationInput) (req *request.Request, output *PutObjectLockConfigurationOutput) { + op := &request.Operation{ + Name: opPutObjectLockConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?object-lock", + } + + if input == nil { + input = &PutObjectLockConfigurationInput{} + } + + output = &PutObjectLockConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutObjectLockConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Places an Object Lock configuration on the specified bucket. The rule specified +// in the Object Lock configuration will be applied by default to every new +// object placed in the specified bucket. For more information, see Locking +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). +// +// - The DefaultRetention settings require both a mode and a period. +// +// - The DefaultRetention period can be either Days or Years but you must +// select one. You cannot specify Days and Years at the same time. +// +// - You can enable Object Lock for new or existing buckets. For more information, +// see Configuring Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-configure.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectLockConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration +func (c *S3) PutObjectLockConfiguration(input *PutObjectLockConfigurationInput) (*PutObjectLockConfigurationOutput, error) { + req, out := c.PutObjectLockConfigurationRequest(input) + return out, req.Send() +} + +// PutObjectLockConfigurationWithContext is the same as PutObjectLockConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectLockConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectLockConfigurationWithContext(ctx aws.Context, input *PutObjectLockConfigurationInput, opts ...request.Option) (*PutObjectLockConfigurationOutput, error) { + req, out := c.PutObjectLockConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectRetention = "PutObjectRetention" + +// PutObjectRetentionRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectRetention operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectRetention for more information on using the PutObjectRetention +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutObjectRetentionRequest method. +// req, resp := client.PutObjectRetentionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention +func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *request.Request, output *PutObjectRetentionOutput) { + op := &request.Operation{ + Name: opPutObjectRetention, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?retention", + } + + if input == nil { + input = &PutObjectRetentionInput{} + } + + output = &PutObjectRetentionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutObjectRetention API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Places an Object Retention configuration on an object. For more information, +// see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). +// Users or accounts require the s3:PutObjectRetention permission in order to +// place an Object Retention configuration on objects. Bypassing a Governance +// Retention configuration requires the s3:BypassGovernanceRetention permission. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectRetention for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention +func (c *S3) PutObjectRetention(input *PutObjectRetentionInput) (*PutObjectRetentionOutput, error) { + req, out := c.PutObjectRetentionRequest(input) + return out, req.Send() +} + +// PutObjectRetentionWithContext is the same as PutObjectRetention with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectRetention for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectRetentionWithContext(ctx aws.Context, input *PutObjectRetentionInput, opts ...request.Option) (*PutObjectRetentionOutput, error) { + req, out := c.PutObjectRetentionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectTagging = "PutObjectTagging" + +// PutObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectTagging for more information on using the PutObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutObjectTaggingRequest method. +// req, resp := client.PutObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) { + op := &request.Operation{ + Name: opPutObjectTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &PutObjectTaggingInput{} + } + + output = &PutObjectTaggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutObjectTagging API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Sets the supplied tag-set to an object that already exists in a bucket. A +// tag is a key-value pair. For more information, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html). +// +// You can associate tags with an object by sending a PUT request against the +// tagging subresource that is associated with the object. You can retrieve +// tags by sending a GET request. For more information, see GetObjectTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html). +// +// For tagging-related restrictions related to characters and encodings, see +// Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html). +// Note that Amazon S3 limits the maximum number of tags to 10 tags per object. +// +// To use this operation, you must have permission to perform the s3:PutObjectTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// To put tags of any other version, use the versionId query parameter. You +// also need permission for the s3:PutObjectVersionTagging action. +// +// PutObjectTagging has the following special errors. For more Amazon S3 errors +// see, Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html). +// +// - InvalidTag - The tag provided was not a valid tag. This error can occur +// if the tag did not pass input validation. For more information, see Object +// Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html). +// +// - MalformedXML - The XML provided does not match the schema. +// +// - OperationAborted - A conflicting conditional action is currently in +// progress against this resource. Please try again. +// +// - InternalError - The service was unable to apply the provided tag to +// the object. +// +// The following operations are related to PutObjectTagging: +// +// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// +// - DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) + return out, req.Send() +} + +// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutPublicAccessBlock = "PutPublicAccessBlock" + +// PutPublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the PutPublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutPublicAccessBlock for more information on using the PutPublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutPublicAccessBlockRequest method. +// req, resp := client.PutPublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock +func (c *S3) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req *request.Request, output *PutPublicAccessBlockOutput) { + op := &request.Operation{ + Name: opPutPublicAccessBlock, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?publicAccessBlock", + } + + if input == nil { + input = &PutPublicAccessBlockInput{} + } + + output = &PutPublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutPublicAccessBlock API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Creates or modifies the PublicAccessBlock configuration for an Amazon S3 +// bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock +// permission. For more information about Amazon S3 permissions, see Specifying +// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket +// or an object, it checks the PublicAccessBlock configuration for both the +// bucket (or the bucket that contains the object) and the bucket owner's account. +// If the PublicAccessBlock configurations are different between the bucket +// and the account, Amazon S3 uses the most restrictive combination of the bucket-level +// and account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// The following operations are related to PutPublicAccessBlock: +// +// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// +// - GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) +// +// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutPublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock +func (c *S3) PutPublicAccessBlock(input *PutPublicAccessBlockInput) (*PutPublicAccessBlockOutput, error) { + req, out := c.PutPublicAccessBlockRequest(input) + return out, req.Send() +} + +// PutPublicAccessBlockWithContext is the same as PutPublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See PutPublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutPublicAccessBlockWithContext(ctx aws.Context, input *PutPublicAccessBlockInput, opts ...request.Option) (*PutPublicAccessBlockOutput, error) { + req, out := c.PutPublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRestoreObject = "RestoreObject" + +// RestoreObjectRequest generates a "aws/request.Request" representing the +// client's request for the RestoreObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RestoreObject for more information on using the RestoreObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the RestoreObjectRequest method. +// req, resp := client.RestoreObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { + op := &request.Operation{ + Name: opRestoreObject, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?restore", + } + + if input == nil { + input = &RestoreObjectInput{} + } + + output = &RestoreObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// RestoreObject API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// # Restores an archived copy of an object back into Amazon S3 +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// This action performs the following types of requests: +// +// - restore an archive - Restore an archived object +// +// For more information about the S3 structure in the request body, see the +// following: +// +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// - Managing Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) +// in the Amazon S3 User Guide +// +// - Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// in the Amazon S3 User Guide +// +// # Permissions +// +// To use this operation, you must have permissions to perform the s3:RestoreObject +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// # Restoring objects +// +// Objects that you archive to the S3 Glacier Flexible Retrieval Flexible Retrieval +// or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive +// or S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real +// time. For objects in the S3 Glacier Flexible Retrieval Flexible Retrieval +// or S3 Glacier Deep Archive storage classes, you must first initiate a restore +// request, and then wait until a temporary copy of the object is available. +// If you want a permanent copy of the object, create a copy of it in the Amazon +// S3 Standard storage class in your S3 bucket. To access an archived object, +// you must restore the object for the duration (number of days) that you specify. +// For objects in the Archive Access or Deep Archive Access tiers of S3 Intelligent-Tiering, +// you must first initiate a restore request, and then wait until the object +// is moved into the Frequent Access tier. +// +// To restore a specific object version, you can provide a version ID. If you +// don't provide a version ID, Amazon S3 restores the current version. +// +// When restoring an archived object, you can specify one of the following data +// access tier options in the Tier element of the request body: +// +// - Expedited - Expedited retrievals allow you to quickly access your data +// stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage +// class or S3 Intelligent-Tiering Archive tier when occasional urgent requests +// for restoring archives are required. For all but the largest archived +// objects (250 MB+), data accessed using Expedited retrievals is typically +// made available within 1–5 minutes. Provisioned capacity ensures that +// retrieval capacity for Expedited retrievals is available when you need +// it. Expedited retrievals and provisioned capacity are not available for +// objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering +// Deep Archive tier. +// +// - Standard - Standard retrievals allow you to access any of your archived +// objects within several hours. This is the default option for retrieval +// requests that do not specify the retrieval option. Standard retrievals +// typically finish within 3–5 hours for objects stored in the S3 Glacier +// Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering +// Archive tier. They typically finish within 12 hours for objects stored +// in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering +// Deep Archive tier. Standard retrievals are free for objects stored in +// S3 Intelligent-Tiering. +// +// - Bulk - Bulk retrievals free for objects stored in the S3 Glacier Flexible +// Retrieval and S3 Intelligent-Tiering storage classes, enabling you to +// retrieve large amounts, even petabytes, of data at no cost. Bulk retrievals +// typically finish within 5–12 hours for objects stored in the S3 Glacier +// Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering +// Archive tier. Bulk retrievals are also the lowest-cost retrieval option +// when restoring objects from S3 Glacier Deep Archive. They typically finish +// within 48 hours for objects stored in the S3 Glacier Deep Archive storage +// class or S3 Intelligent-Tiering Deep Archive tier. +// +// For more information about archive retrieval options and provisioned capacity +// for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) +// in the Amazon S3 User Guide. +// +// You can use Amazon S3 restore speed upgrade to change the restore speed to +// a faster speed while it is in progress. For more information, see Upgrading +// the speed of an in-progress restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) +// in the Amazon S3 User Guide. +// +// To get the status of object restoration, you can send a HEAD request. Operations +// return the x-amz-restore header, which provides information about the restoration +// status, in the response. You can use Amazon S3 event notifications to notify +// you when a restore is initiated or completed. For more information, see Configuring +// Amazon S3 Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// in the Amazon S3 User Guide. +// +// After restoring an archived object, you can update the restoration period +// by reissuing the request with a new period. Amazon S3 updates the restoration +// period relative to the current time and charges only for the request-there +// are no data transfer charges. You cannot update the restoration period when +// Amazon S3 is actively processing your current restore request for the object. +// +// If your bucket has a lifecycle configuration with a rule that includes an +// expiration action, the object expiration overrides the life span that you +// specify in a restore request. For example, if you restore an object copy +// for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes +// the object in 3 days. For more information about lifecycle configuration, +// see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// and Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in Amazon S3 User Guide. +// +// # Responses +// +// A successful action returns either the 200 OK or 202 Accepted status code. +// +// - If the object is not previously restored, then Amazon S3 returns 202 +// Accepted in the response. +// +// - If the object is previously restored, Amazon S3 returns 200 OK in the +// response. +// +// - Special errors: Code: RestoreAlreadyInProgress Cause: Object restore +// is already in progress. HTTP Status Code: 409 Conflict SOAP Fault Code +// Prefix: Client +// +// - Code: GlacierExpeditedRetrievalNotAvailable Cause: expedited retrievals +// are currently not available. Try again later. (Returned if there is insufficient +// capacity to process the Expedited request. This error applies only to +// Expedited retrievals and not to S3 Standard or Bulk retrievals.) HTTP +// Status Code: 503 SOAP Fault Code Prefix: N/A +// +// The following operations are related to RestoreObject: +// +// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// +// - GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation RestoreObject for usage and error information. +// +// Returned Error Codes: +// - ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" +// This action is not allowed against this storage tier. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + return out, req.Send() +} + +// RestoreObjectWithContext is the same as RestoreObject with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSelectObjectContent = "SelectObjectContent" + +// SelectObjectContentRequest generates a "aws/request.Request" representing the +// client's request for the SelectObjectContent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SelectObjectContent for more information on using the SelectObjectContent +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the SelectObjectContentRequest method. +// req, resp := client.SelectObjectContentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent +func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *request.Request, output *SelectObjectContentOutput) { + op := &request.Operation{ + Name: opSelectObjectContent, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?select&select-type=2", + } + + if input == nil { + input = &SelectObjectContentInput{} + } + + output = &SelectObjectContentOutput{} + req = c.newRequest(op, input, output) + + es := NewSelectObjectContentEventStream() + req.Handlers.Unmarshal.PushBack(es.setStreamCloser) + output.EventStream = es + + req.Handlers.Send.Swap(client.LogHTTPResponseHandler.Name, client.LogHTTPResponseHeaderHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, rest.UnmarshalHandler) + req.Handlers.Unmarshal.PushBack(es.runOutputStream) + req.Handlers.Unmarshal.PushBack(es.runOnStreamPartClose) + return +} + +// SelectObjectContent API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// This action filters the contents of an Amazon S3 object based on a simple +// structured query language (SQL) statement. In the request, along with the +// SQL expression, you must also specify a data serialization format (JSON, +// CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse +// object data into records, and returns only records that match the specified +// SQL expression. You must also specify the data serialization format for the +// response. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// For more information about Amazon S3 Select, see Selecting Content from Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) +// and SELECT Command (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html) +// in the Amazon S3 User Guide. +// +// # Permissions +// +// You must have the s3:GetObject permission for this operation. Amazon S3 Select +// does not support anonymous access. For more information about permissions, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// in the Amazon S3 User Guide. +// +// # Object Data Formats +// +// You can use Amazon S3 Select to query objects that have the following format +// properties: +// +// - CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format. +// +// - UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports. +// +// - GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. +// GZIP and BZIP2 are the only compression formats that Amazon S3 Select +// supports for CSV and JSON files. Amazon S3 Select supports columnar compression +// for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object +// compression for Parquet objects. +// +// - Server-side encryption - Amazon S3 Select supports querying objects +// that are protected with server-side encryption. For objects that are encrypted +// with customer-provided encryption keys (SSE-C), you must use HTTPS, and +// you must use the headers that are documented in the GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) +// in the Amazon S3 User Guide. For objects that are encrypted with Amazon +// S3 managed keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side +// encryption is handled transparently, so you don't need to specify anything. +// For more information about server-side encryption, including SSE-S3 and +// SSE-KMS, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// in the Amazon S3 User Guide. +// +// # Working with the Response Body +// +// Given the response size is unknown, Amazon S3 Select streams the response +// as a series of messages and includes a Transfer-Encoding header with chunked +// as its value in the response. For more information, see Appendix: SelectObjectContent +// Response (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html). +// +// # GetObject Support +// +// The SelectObjectContent action does not support the following GetObject functionality. +// For more information, see GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). +// +// - Range: Although you can specify a scan range for an Amazon S3 Select +// request (see SelectObjectContentRequest - ScanRange (https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange) +// in the request parameters), you cannot specify the range of bytes of an +// object to return. +// +// - The GLACIER, DEEP_ARCHIVE, and REDUCED_REDUNDANCY storage classes, or +// the ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING +// storage class: You cannot query objects in the GLACIER, DEEP_ARCHIVE, +// or REDUCED_REDUNDANCY storage classes, nor objects in the ARCHIVE_ACCESS +// or DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage +// class. For more information about storage classes, see Using Amazon S3 +// storage classes (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html) +// in the Amazon S3 User Guide. +// +// # Special Errors +// +// For a list of special errors for this operation, see List of SELECT Object +// Content Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList) +// +// The following operations are related to SelectObjectContent: +// +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation SelectObjectContent for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent +func (c *S3) SelectObjectContent(input *SelectObjectContentInput) (*SelectObjectContentOutput, error) { + req, out := c.SelectObjectContentRequest(input) + return out, req.Send() +} + +// SelectObjectContentWithContext is the same as SelectObjectContent with the addition of +// the ability to pass a context and additional request options. +// +// See SelectObjectContent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) SelectObjectContentWithContext(ctx aws.Context, input *SelectObjectContentInput, opts ...request.Option) (*SelectObjectContentOutput, error) { + req, out := c.SelectObjectContentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +var _ awserr.Error +var _ time.Time + +// SelectObjectContentEventStream provides the event stream handling for the SelectObjectContent. +// +// For testing and mocking the event stream this type should be initialized via +// the NewSelectObjectContentEventStream constructor function. Using the functional options +// to pass in nested mock behavior. +type SelectObjectContentEventStream struct { + + // Reader is the EventStream reader for the SelectObjectContentEventStream + // events. This value is automatically set by the SDK when the API call is made + // Use this member when unit testing your code with the SDK to mock out the + // EventStream Reader. + // + // Must not be nil. + Reader SelectObjectContentEventStreamReader + + outputReader io.ReadCloser + + // StreamCloser is the io.Closer for the EventStream connection. For HTTP + // EventStream this is the response Body. The stream will be closed when + // the Close method of the EventStream is called. + StreamCloser io.Closer + + done chan struct{} + closeOnce sync.Once + err *eventstreamapi.OnceError +} + +// NewSelectObjectContentEventStream initializes an SelectObjectContentEventStream. +// This function should only be used for testing and mocking the SelectObjectContentEventStream +// stream within your application. +// +// The Reader member must be set before reading events from the stream. +// +// The StreamCloser member should be set to the underlying io.Closer, +// (e.g. http.Response.Body), that will be closed when the stream Close method +// is called. +// +// es := NewSelectObjectContentEventStream(func(o *SelectObjectContentEventStream){ +// es.Reader = myMockStreamReader +// es.StreamCloser = myMockStreamCloser +// }) +func NewSelectObjectContentEventStream(opts ...func(*SelectObjectContentEventStream)) *SelectObjectContentEventStream { + es := &SelectObjectContentEventStream{ + done: make(chan struct{}), + err: eventstreamapi.NewOnceError(), + } + + for _, fn := range opts { + fn(es) + } + + return es +} + +func (es *SelectObjectContentEventStream) setStreamCloser(r *request.Request) { + es.StreamCloser = r.HTTPResponse.Body +} + +func (es *SelectObjectContentEventStream) runOnStreamPartClose(r *request.Request) { + if es.done == nil { + return + } + go es.waitStreamPartClose() + +} + +func (es *SelectObjectContentEventStream) waitStreamPartClose() { + var outputErrCh <-chan struct{} + if v, ok := es.Reader.(interface{ ErrorSet() <-chan struct{} }); ok { + outputErrCh = v.ErrorSet() + } + var outputClosedCh <-chan struct{} + if v, ok := es.Reader.(interface{ Closed() <-chan struct{} }); ok { + outputClosedCh = v.Closed() + } + + select { + case <-es.done: + case <-outputErrCh: + es.err.SetError(es.Reader.Err()) + es.Close() + case <-outputClosedCh: + if err := es.Reader.Err(); err != nil { + es.err.SetError(es.Reader.Err()) + } + es.Close() + } +} + +// Events returns a channel to read events from. +// +// These events are: +// +// - ContinuationEvent +// - EndEvent +// - ProgressEvent +// - RecordsEvent +// - StatsEvent +// - SelectObjectContentEventStreamUnknownEvent +func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { + return es.Reader.Events() +} + +func (es *SelectObjectContentEventStream) runOutputStream(r *request.Request) { + var opts []func(*eventstream.Decoder) + if r.Config.Logger != nil && r.Config.LogLevel.Matches(aws.LogDebugWithEventStreamBody) { + opts = append(opts, eventstream.DecodeWithLogger(r.Config.Logger)) + } + + unmarshalerForEvent := unmarshalerForSelectObjectContentEventStreamEvent{ + metadata: protocol.ResponseMetadata{ + StatusCode: r.HTTPResponse.StatusCode, + RequestID: r.RequestID, + }, + }.UnmarshalerForEventName + + decoder := eventstream.NewDecoder(r.HTTPResponse.Body, opts...) + eventReader := eventstreamapi.NewEventReader(decoder, + protocol.HandlerPayloadUnmarshal{ + Unmarshalers: r.Handlers.UnmarshalStream, + }, + unmarshalerForEvent, + ) + + es.outputReader = r.HTTPResponse.Body + es.Reader = newReadSelectObjectContentEventStream(eventReader) +} + +// Close closes the stream. This will also cause the stream to be closed. +// Close must be called when done using the stream API. Not calling Close +// may result in resource leaks. +// +// You can use the closing of the Reader's Events channel to terminate your +// application's read from the API's stream. +func (es *SelectObjectContentEventStream) Close() (err error) { + es.closeOnce.Do(es.safeClose) + return es.Err() +} + +func (es *SelectObjectContentEventStream) safeClose() { + if es.done != nil { + close(es.done) + } + + es.Reader.Close() + if es.outputReader != nil { + es.outputReader.Close() + } + + es.StreamCloser.Close() +} + +// Err returns any error that occurred while reading or writing EventStream +// Events from the service API's response. Returns nil if there were no errors. +func (es *SelectObjectContentEventStream) Err() error { + if err := es.err.Err(); err != nil { + return err + } + if err := es.Reader.Err(); err != nil { + return err + } + + return nil +} + +const opUploadPart = "UploadPart" + +// UploadPartRequest generates a "aws/request.Request" representing the +// client's request for the UploadPart operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UploadPart for more information on using the UploadPart +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UploadPartRequest method. +// req, resp := client.UploadPartRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { + op := &request.Operation{ + Name: opUploadPart, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartInput{} + } + + output = &UploadPartOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadPart API operation for Amazon Simple Storage Service. +// +// Uploads a part in a multipart upload. +// +// In this operation, you provide new data as a part of an object in your request. +// However, you have an option to specify your existing Amazon S3 object as +// a data source for the part you are uploading. To upload a part from an existing +// object, you use the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// operation. +// +// You must initiate a multipart upload (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)) +// before you can upload any part. In response to your initiate request, Amazon +// S3 returns an upload ID, a unique identifier that you must include in your +// upload part request. +// +// Part numbers can be any number from 1 to 10,000, inclusive. A part number +// uniquely identifies a part and also defines its position within the object +// being created. If you upload a new part using the same part number that was +// used with a previous part, the previously uploaded part is overwritten. +// +// For information about maximum and minimum part sizes and other multipart +// upload specifications, see Multipart upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) +// in the Amazon S3 User Guide. +// +// After you initiate multipart upload and upload one or more parts, you must +// either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +// +// For more information on multipart uploads, go to Multipart Upload Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the +// Amazon S3 User Guide . +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - For information on the permissions +// required to use the multipart upload API, see Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// # Data integrity +// +// General purpose bucket - To ensure that data is not corrupted traversing +// the network, specify the Content-MD5 header in the upload part request. Amazon +// S3 checks the part data against the provided MD5 value. If they do not match, +// Amazon S3 returns an error. If the upload request is signed with Signature +// Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256 header +// as a checksum instead of Content-MD5. For more information see Authenticating +// Requests: Using the Authorization Header (Amazon Web Services Signature Version +// 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html). +// +// Directory buckets - MD5 is not supported by directory buckets. You can use +// checksum algorithms to check object integrity. +// +// Encryption +// +// - General purpose bucket - Server-side encryption is for data encryption +// at rest. Amazon S3 encrypts your data as it writes it to disks in its +// data centers and decrypts it when you access it. You have mutually exclusive +// options to protect data using server-side encryption in Amazon S3, depending +// on how you choose to manage the encryption keys. Specifically, the encryption +// key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS +// keys (SSE-KMS), and Customer-Provided Keys (SSE-C). Amazon S3 encrypts +// data with server-side encryption using Amazon S3 managed keys (SSE-S3) +// by default. You can optionally tell Amazon S3 to encrypt data at rest +// using server-side encryption with other key options. The option you use +// depends on whether you want to use KMS keys (SSE-KMS) or provide your +// own encryption key (SSE-C). Server-side encryption is supported by the +// S3 Multipart Upload operations. Unless you are using a customer-provided +// encryption key (SSE-C), you don't need to specify the encryption parameters +// in each UploadPart request. Instead, you only need to specify the server-side +// encryption parameters in the initial Initiate Multipart request. For more +// information, see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). +// If you request server-side encryption using a customer-provided encryption +// key (SSE-C) in your initiate multipart upload request, you must provide +// identical encryption information in each part upload using the following +// request headers. x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key +// x-amz-server-side-encryption-customer-key-MD5 +// +// - Directory bucket - For directory buckets, only server-side encryption +// with Amazon S3 managed keys (SSE-S3) (AES256) is supported. +// +// For more information, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) +// in the Amazon S3 User Guide. +// +// Special errors +// +// - Error Code: NoSuchUpload Description: The specified multipart upload +// does not exist. The upload ID might be invalid, or the multipart upload +// might have been aborted or completed. HTTP Status Code: 404 Not Found +// SOAP Fault Code Prefix: Client +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// The following operations are related to UploadPart: +// +// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation UploadPart for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + return out, req.Send() +} + +// UploadPartWithContext is the same as UploadPart with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPart for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartWithContext(ctx aws.Context, input *UploadPartInput, opts ...request.Option) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUploadPartCopy = "UploadPartCopy" + +// UploadPartCopyRequest generates a "aws/request.Request" representing the +// client's request for the UploadPartCopy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UploadPartCopy for more information on using the UploadPartCopy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UploadPartCopyRequest method. +// req, resp := client.UploadPartCopyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { + op := &request.Operation{ + Name: opUploadPartCopy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartCopyInput{} + } + + output = &UploadPartCopyOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadPartCopy API operation for Amazon Simple Storage Service. +// +// Uploads a part by copying data from an existing object as data source. To +// specify the data source, you add the request header x-amz-copy-source in +// your request. To specify a byte range, you add the request header x-amz-copy-source-range +// in your request. +// +// For information about maximum and minimum part sizes and other multipart +// upload specifications, see Multipart upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) +// in the Amazon S3 User Guide. +// +// Instead of copying data from an existing object as part data, you might use +// the UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// action to upload new data as a part of an object in your request. +// +// You must initiate a multipart upload before you can upload any part. In response +// to your initiate request, Amazon S3 returns the upload ID, a unique identifier +// that you must include in your upload part request. +// +// For conceptual information about multipart uploads, see Uploading Objects +// Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon S3 User Guide. For information about copying objects using +// a single atomic action vs. a multipart upload, see Operations on Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) in +// the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// # Authentication and authorization +// +// All UploadPartCopy requests must be authenticated and signed by using IAM +// credentials (access key ID and secret access key for the IAM identities). +// All headers with the x-amz- prefix, including x-amz-copy-source, must be +// signed. For more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). +// +// Directory buckets - You must use IAM credentials to authenticate and authorize +// your access to the UploadPartCopy API operation, instead of using the temporary +// security credentials through the CreateSession API operation. +// +// Amazon Web Services CLI or SDKs handles authentication and authorization +// on your behalf. +// +// # Permissions +// +// You must have READ access to the source object and WRITE access to the destination +// bucket. +// +// - General purpose bucket permissions - You must have the permissions in +// a policy based on the bucket types of your source bucket and destination +// bucket in an UploadPartCopy operation. If the source object is in a general +// purpose bucket, you must have the s3:GetObject permission to read the +// source object that is being copied. If the destination bucket is a general +// purpose bucket, you must have the s3:PutObject permission to write the +// object copy to the destination bucket. For information about permissions +// required to use the multipart upload API, see Multipart upload API and +// permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - You must have permissions in a bucket +// policy or an IAM identity-based policy based on the source and destination +// bucket types in an UploadPartCopy operation. If the source object that +// you want to copy is in a directory bucket, you must have the s3express:CreateSession +// permission in the Action element of a policy to read the object. By default, +// the session is in the ReadWrite mode. If you want to restrict the access, +// you can explicitly set the s3express:SessionMode condition key to ReadOnly +// on the copy source bucket. If the copy destination is a directory bucket, +// you must have the s3express:CreateSession permission in the Action element +// of a policy to write the object to the destination. The s3express:SessionMode +// condition key cannot be set to ReadOnly on the copy destination. For example +// policies, see Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// and Amazon Web Services Identity and Access Management (IAM) identity-based +// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) +// in the Amazon S3 User Guide. +// +// Encryption +// +// - General purpose buckets - For information about using server-side encryption +// with customer-provided encryption keys with the UploadPartCopy operation, +// see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// and UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html). +// +// - Directory buckets - For directory buckets, only server-side encryption +// with Amazon S3 managed keys (SSE-S3) (AES256) is supported. +// +// Special errors +// +// - Error Code: NoSuchUpload Description: The specified multipart upload +// does not exist. The upload ID might be invalid, or the multipart upload +// might have been aborted or completed. HTTP Status Code: 404 Not Found +// +// - Error Code: InvalidRequest Description: The specified copy source is +// not supported as a byte-range copy source. HTTP Status Code: 400 Bad Request +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// The following operations are related to UploadPartCopy: +// +// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation UploadPartCopy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + return out, req.Send() +} + +// UploadPartCopyWithContext is the same as UploadPartCopy with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPartCopy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInput, opts ...request.Option) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opWriteGetObjectResponse = "WriteGetObjectResponse" + +// WriteGetObjectResponseRequest generates a "aws/request.Request" representing the +// client's request for the WriteGetObjectResponse operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See WriteGetObjectResponse for more information on using the WriteGetObjectResponse +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the WriteGetObjectResponseRequest method. +// req, resp := client.WriteGetObjectResponseRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WriteGetObjectResponse +func (c *S3) WriteGetObjectResponseRequest(input *WriteGetObjectResponseInput) (req *request.Request, output *WriteGetObjectResponseOutput) { + op := &request.Operation{ + Name: opWriteGetObjectResponse, + HTTPMethod: "POST", + HTTPPath: "/WriteGetObjectResponse", + } + + if input == nil { + input = &WriteGetObjectResponseInput{} + } + + output = &WriteGetObjectResponseOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Sign.Remove(v4.SignRequestHandler) + handler := v4.BuildNamedHandler("v4.CustomSignerHandler", v4.WithUnsignedPayload) + req.Handlers.Sign.PushFrontNamed(handler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{RequestRoute}.", input.hostLabels)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// WriteGetObjectResponse API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Passes transformed objects to a GetObject operation when using Object Lambda +// access points. For information about Object Lambda access points, see Transforming +// objects with Object Lambda access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html) +// in the Amazon S3 User Guide. +// +// This operation supports metadata that can be returned by GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html), +// in addition to RequestRoute, RequestToken, StatusCode, ErrorCode, and ErrorMessage. +// The GetObject response metadata is supported so that the WriteGetObjectResponse +// caller, typically an Lambda function, can provide the same metadata when +// it internally invokes GetObject. When WriteGetObjectResponse is called by +// a customer-owned Lambda function, the metadata returned to the end user GetObject +// call might differ from what Amazon S3 would normally return. +// +// You can include any number of metadata headers. When including a metadata +// header, it should be prefaced with x-amz-meta. For example, x-amz-meta-my-custom-header: +// MyCustomValue. The primary use case for this is to forward GetObject metadata. +// +// Amazon Web Services provides some prebuilt Lambda functions that you can +// use with S3 Object Lambda to detect and redact personally identifiable information +// (PII) and decompress S3 objects. These Lambda functions are available in +// the Amazon Web Services Serverless Application Repository, and can be selected +// through the Amazon Web Services Management Console when you create your Object +// Lambda access point. +// +// Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, +// a natural language processing (NLP) service using machine learning to find +// insights and relationships in text. It automatically detects personally identifiable +// information (PII) such as names, addresses, dates, credit card numbers, and +// social security numbers from documents in your Amazon S3 bucket. +// +// Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, a +// natural language processing (NLP) service using machine learning to find +// insights and relationships in text. It automatically redacts personally identifiable +// information (PII) such as names, addresses, dates, credit card numbers, and +// social security numbers from documents in your Amazon S3 bucket. +// +// Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, +// is equipped to decompress objects stored in S3 in one of six compressed file +// formats including bzip2, gzip, snappy, zlib, zstandard and ZIP. +// +// For information on how to view and use these functions, see Using Amazon +// Web Services built Lambda functions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html) +// in the Amazon S3 User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation WriteGetObjectResponse for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WriteGetObjectResponse +func (c *S3) WriteGetObjectResponse(input *WriteGetObjectResponseInput) (*WriteGetObjectResponseOutput, error) { + req, out := c.WriteGetObjectResponseRequest(input) + return out, req.Send() +} + +// WriteGetObjectResponseWithContext is the same as WriteGetObjectResponse with the addition of +// the ability to pass a context and additional request options. +// +// See WriteGetObjectResponse for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WriteGetObjectResponseWithContext(ctx aws.Context, input *WriteGetObjectResponseInput, opts ...request.Option) (*WriteGetObjectResponseOutput, error) { + req, out := c.WriteGetObjectResponseRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Specifies the days since the initiation of an incomplete multipart upload +// that Amazon S3 will wait before permanently removing all parts of the upload. +// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket +// Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) +// in the Amazon S3 User Guide. +type AbortIncompleteMultipartUpload struct { + _ struct{} `type:"structure"` + + // Specifies the number of days after which Amazon S3 aborts an incomplete multipart + // upload. + DaysAfterInitiation *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AbortIncompleteMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AbortIncompleteMultipartUpload) GoString() string { + return s.String() +} + +// SetDaysAfterInitiation sets the DaysAfterInitiation field's value. +func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortIncompleteMultipartUpload { + s.DaysAfterInitiation = &v + return s +} + +type AbortMultipartUploadInput struct { + _ struct{} `locationName:"AbortMultipartUploadRequest" type:"structure"` + + // The bucket name to which the upload was taking place. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Key of the object for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Upload ID that identifies the multipart upload. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AbortMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AbortMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AbortMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AbortMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *AbortMultipartUploadInput) SetBucket(v string) *AbortMultipartUploadInput { + s.Bucket = &v + return s +} + +func (s *AbortMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *AbortMultipartUploadInput) SetExpectedBucketOwner(v string) *AbortMultipartUploadInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *AbortMultipartUploadInput) SetKey(v string) *AbortMultipartUploadInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *AbortMultipartUploadInput) SetRequestPayer(v string) *AbortMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadInput { + s.UploadId = &v + return s +} + +func (s *AbortMultipartUploadInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *AbortMultipartUploadInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s AbortMultipartUploadInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type AbortMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AbortMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AbortMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// Configures the transfer acceleration state for an Amazon S3 bucket. For more +// information, see Amazon S3 Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) +// in the Amazon S3 User Guide. +type AccelerateConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the transfer acceleration status of the bucket. + Status *string `type:"string" enum:"BucketAccelerateStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccelerateConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccelerateConfiguration) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration { + s.Status = &v + return s +} + +// Contains the elements that set the ACL permissions for an object per grantee. +type AccessControlPolicy struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + // Container for the bucket owner's display name and ID. + Owner *Owner `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessControlPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessControlPolicy) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccessControlPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccessControlPolicy"} + if s.Grants != nil { + for i, v := range s.Grants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Grants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrants sets the Grants field's value. +func (s *AccessControlPolicy) SetGrants(v []*Grant) *AccessControlPolicy { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy { + s.Owner = v + return s +} + +// A container for information about access control for replicas. +type AccessControlTranslation struct { + _ struct{} `type:"structure"` + + // Specifies the replica ownership. For default and valid values, see PUT bucket + // replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // in the Amazon S3 API Reference. + // + // Owner is a required field + Owner *string `type:"string" required:"true" enum:"OwnerOverride"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessControlTranslation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessControlTranslation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccessControlTranslation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccessControlTranslation"} + if s.Owner == nil { + invalidParams.Add(request.NewErrParamRequired("Owner")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOwner sets the Owner field's value. +func (s *AccessControlTranslation) SetOwner(v string) *AccessControlTranslation { + s.Owner = &v + return s +} + +// A conjunction (logical AND) of predicates, which is used in evaluating a +// metrics filter. The operator must have at least two predicates in any combination, +// and an object must match all of the predicates for the filter to apply. +type AnalyticsAndOperator struct { + _ struct{} `type:"structure"` + + // The prefix to use when evaluating an AND predicate: The prefix that an object + // must have to be included in the metrics results. + Prefix *string `type:"string"` + + // The list of tags to use when evaluating an AND predicate. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsAndOperator) SetPrefix(v string) *AnalyticsAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator { + s.Tags = v + return s +} + +// Specifies the configuration and any analyses for the analytics filter of +// an Amazon S3 bucket. +type AnalyticsConfiguration struct { + _ struct{} `type:"structure"` + + // The filter used to describe a set of objects for analyses. A filter must + // have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). + // If no filter is provided, all objects will be considered in any analysis. + Filter *AnalyticsFilter `type:"structure"` + + // The ID that identifies the analytics configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // Contains data related to access patterns to be collected and made available + // to analyze the tradeoffs between different storage classes. + // + // StorageClassAnalysis is a required field + StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.StorageClassAnalysis == nil { + invalidParams.Add(request.NewErrParamRequired("StorageClassAnalysis")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.StorageClassAnalysis != nil { + if err := s.StorageClassAnalysis.Validate(); err != nil { + invalidParams.AddNested("StorageClassAnalysis", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *AnalyticsConfiguration) SetFilter(v *AnalyticsFilter) *AnalyticsConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *AnalyticsConfiguration) SetId(v string) *AnalyticsConfiguration { + s.Id = &v + return s +} + +// SetStorageClassAnalysis sets the StorageClassAnalysis field's value. +func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis) *AnalyticsConfiguration { + s.StorageClassAnalysis = v + return s +} + +// Where to publish the analytics results. +type AnalyticsExportDestination struct { + _ struct{} `type:"structure"` + + // A destination signifying output to an S3 bucket. + // + // S3BucketDestination is a required field + S3BucketDestination *AnalyticsS3BucketDestination `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsExportDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsExportDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsExportDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsExportDestination"} + if s.S3BucketDestination == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) + } + if s.S3BucketDestination != nil { + if err := s.S3BucketDestination.Validate(); err != nil { + invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3BucketDestination sets the S3BucketDestination field's value. +func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3BucketDestination) *AnalyticsExportDestination { + s.S3BucketDestination = v + return s +} + +// The filter used to describe a set of objects for analyses. A filter must +// have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). +// If no filter is provided, all objects will be considered in any analysis. +type AnalyticsFilter struct { + _ struct{} `type:"structure"` + + // A conjunction (logical AND) of predicates, which is used in evaluating an + // analytics filter. The operator must have at least two predicates. + And *AnalyticsAndOperator `type:"structure"` + + // The prefix to use when evaluating an analytics filter. + Prefix *string `type:"string"` + + // The tag to use when evaluating an analytics filter. + Tag *Tag `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *AnalyticsFilter) SetAnd(v *AnalyticsAndOperator) *AnalyticsFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsFilter) SetPrefix(v string) *AnalyticsFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter { + s.Tag = v + return s +} + +// Contains information about where to publish the analytics results. +type AnalyticsS3BucketDestination struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the bucket to which data is exported. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // The account ID that owns the destination S3 bucket. If no account ID is provided, + // the owner is not validated before exporting data. + // + // Although this value is optional, we strongly recommend that you set it to + // help prevent problems if the destination bucket ownership changes. + BucketAccountId *string `type:"string"` + + // Specifies the file format used when exporting data to Amazon S3. + // + // Format is a required field + Format *string `type:"string" required:"true" enum:"AnalyticsS3ExportFileFormat"` + + // The prefix to use when exporting data. The prefix is prepended to all results. + Prefix *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsS3BucketDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsS3BucketDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsS3BucketDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsS3BucketDestination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Format == nil { + invalidParams.Add(request.NewErrParamRequired("Format")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *AnalyticsS3BucketDestination) SetBucket(v string) *AnalyticsS3BucketDestination { + s.Bucket = &v + return s +} + +func (s *AnalyticsS3BucketDestination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketAccountId sets the BucketAccountId field's value. +func (s *AnalyticsS3BucketDestination) SetBucketAccountId(v string) *AnalyticsS3BucketDestination { + s.BucketAccountId = &v + return s +} + +// SetFormat sets the Format field's value. +func (s *AnalyticsS3BucketDestination) SetFormat(v string) *AnalyticsS3BucketDestination { + s.Format = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDestination { + s.Prefix = &v + return s +} + +// In terms of implementation, a Bucket is a resource. +type Bucket struct { + _ struct{} `type:"structure"` + + // Date the bucket was created. This date can change when making changes to + // your bucket, such as editing its bucket policy. + CreationDate *time.Time `type:"timestamp"` + + // The name of the bucket. + Name *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Bucket) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Bucket) GoString() string { + return s.String() +} + +// SetCreationDate sets the CreationDate field's value. +func (s *Bucket) SetCreationDate(v time.Time) *Bucket { + s.CreationDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *Bucket) SetName(v string) *Bucket { + s.Name = &v + return s +} + +// Specifies the information about the bucket that will be created. For more +// information about directory buckets, see Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) +// in the Amazon S3 User Guide. +// +// This functionality is only supported by directory buckets. +type BucketInfo struct { + _ struct{} `type:"structure"` + + // The number of Availability Zone that's used for redundancy for the bucket. + DataRedundancy *string `type:"string" enum:"DataRedundancy"` + + // The type of bucket. + Type *string `type:"string" enum:"BucketType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BucketInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BucketInfo) GoString() string { + return s.String() +} + +// SetDataRedundancy sets the DataRedundancy field's value. +func (s *BucketInfo) SetDataRedundancy(v string) *BucketInfo { + s.DataRedundancy = &v + return s +} + +// SetType sets the Type field's value. +func (s *BucketInfo) SetType(v string) *BucketInfo { + s.Type = &v + return s +} + +// Specifies the lifecycle configuration for objects in an Amazon S3 bucket. +// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in the Amazon S3 User Guide. +type BucketLifecycleConfiguration struct { + _ struct{} `type:"structure"` + + // A lifecycle rule for individual objects in an Amazon S3 bucket. + // + // Rules is a required field + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BucketLifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BucketLifecycleConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketLifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketLifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifecycleConfiguration { + s.Rules = v + return s +} + +// Container for logging status information. +type BucketLoggingStatus struct { + _ struct{} `type:"structure"` + + // Describes where logs are stored and the prefix that Amazon S3 assigns to + // all log object keys for a bucket. For more information, see PUT Bucket logging + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) + // in the Amazon S3 API Reference. + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BucketLoggingStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BucketLoggingStatus) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketLoggingStatus) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketLoggingStatus"} + if s.LoggingEnabled != nil { + if err := s.LoggingEnabled.Validate(); err != nil { + invalidParams.AddNested("LoggingEnabled", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLoggingEnabled sets the LoggingEnabled field's value. +func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggingStatus { + s.LoggingEnabled = v + return s +} + +// Describes the cross-origin access configuration for objects in an Amazon +// S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon +// S3 User Guide. +type CORSConfiguration struct { + _ struct{} `type:"structure"` + + // A set of origins and methods (cross-origin access that you want to allow). + // You can add up to 100 rules to the configuration. + // + // CORSRules is a required field + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CORSConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CORSConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSConfiguration"} + if s.CORSRules == nil { + invalidParams.Add(request.NewErrParamRequired("CORSRules")) + } + if s.CORSRules != nil { + for i, v := range s.CORSRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CORSRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCORSRules sets the CORSRules field's value. +func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration { + s.CORSRules = v + return s +} + +// Specifies a cross-origin access rule for an Amazon S3 bucket. +type CORSRule struct { + _ struct{} `type:"structure"` + + // Headers that are specified in the Access-Control-Request-Headers header. + // These headers are allowed in a preflight OPTIONS request. In response to + // any preflight OPTIONS request, Amazon S3 returns any requested headers that + // are allowed. + AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` + + // An HTTP method that you allow the origin to execute. Valid values are GET, + // PUT, HEAD, POST, and DELETE. + // + // AllowedMethods is a required field + AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` + + // One or more origins you want customers to be able to access the bucket from. + // + // AllowedOrigins is a required field + AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"` + + // One or more headers in the response that you want customers to be able to + // access from their applications (for example, from a JavaScript XMLHttpRequest + // object). + ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // The time in seconds that your browser is to cache the preflight response + // for the specified resource. + MaxAgeSeconds *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CORSRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CORSRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSRule"} + if s.AllowedMethods == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedMethods")) + } + if s.AllowedOrigins == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedOrigins")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAllowedHeaders sets the AllowedHeaders field's value. +func (s *CORSRule) SetAllowedHeaders(v []*string) *CORSRule { + s.AllowedHeaders = v + return s +} + +// SetAllowedMethods sets the AllowedMethods field's value. +func (s *CORSRule) SetAllowedMethods(v []*string) *CORSRule { + s.AllowedMethods = v + return s +} + +// SetAllowedOrigins sets the AllowedOrigins field's value. +func (s *CORSRule) SetAllowedOrigins(v []*string) *CORSRule { + s.AllowedOrigins = v + return s +} + +// SetExposeHeaders sets the ExposeHeaders field's value. +func (s *CORSRule) SetExposeHeaders(v []*string) *CORSRule { + s.ExposeHeaders = v + return s +} + +// SetID sets the ID field's value. +func (s *CORSRule) SetID(v string) *CORSRule { + s.ID = &v + return s +} + +// SetMaxAgeSeconds sets the MaxAgeSeconds field's value. +func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule { + s.MaxAgeSeconds = &v + return s +} + +// Describes how an uncompressed comma-separated values (CSV)-formatted input +// object is formatted. +type CSVInput struct { + _ struct{} `type:"structure"` + + // Specifies that CSV field values may contain quoted record delimiters and + // such records should be allowed. Default value is FALSE. Setting this value + // to TRUE may lower performance. + AllowQuotedRecordDelimiter *bool `type:"boolean"` + + // A single character used to indicate that a row should be ignored when the + // character is present at the start of that row. You can specify any character + // to indicate a comment line. The default character is #. + // + // Default: # + Comments *string `type:"string"` + + // A single character used to separate individual fields in a record. You can + // specify an arbitrary delimiter. + FieldDelimiter *string `type:"string"` + + // Describes the first line of input. Valid values are: + // + // * NONE: First line is not a header. + // + // * IGNORE: First line is a header, but you can't use the header values + // to indicate the column in an expression. You can use column position (such + // as _1, _2, …) to indicate the column (SELECT s._1 FROM OBJECT s). + // + // * Use: First line is a header, and you can use the header value to identify + // a column in an expression (SELECT "name" FROM OBJECT). + FileHeaderInfo *string `type:"string" enum:"FileHeaderInfo"` + + // A single character used for escaping when the field delimiter is part of + // the value. For example, if the value is a, b, Amazon S3 wraps this field + // value in quotation marks, as follows: " a , b ". + // + // Type: String + // + // Default: " + // + // Ancestors: CSV + QuoteCharacter *string `type:"string"` + + // A single character used for escaping the quotation mark character inside + // an already escaped value. For example, the value """ a , b """ is parsed + // as " a , b ". + QuoteEscapeCharacter *string `type:"string"` + + // A single character used to separate individual records in the input. Instead + // of the default value, you can specify an arbitrary delimiter. + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CSVInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CSVInput) GoString() string { + return s.String() +} + +// SetAllowQuotedRecordDelimiter sets the AllowQuotedRecordDelimiter field's value. +func (s *CSVInput) SetAllowQuotedRecordDelimiter(v bool) *CSVInput { + s.AllowQuotedRecordDelimiter = &v + return s +} + +// SetComments sets the Comments field's value. +func (s *CSVInput) SetComments(v string) *CSVInput { + s.Comments = &v + return s +} + +// SetFieldDelimiter sets the FieldDelimiter field's value. +func (s *CSVInput) SetFieldDelimiter(v string) *CSVInput { + s.FieldDelimiter = &v + return s +} + +// SetFileHeaderInfo sets the FileHeaderInfo field's value. +func (s *CSVInput) SetFileHeaderInfo(v string) *CSVInput { + s.FileHeaderInfo = &v + return s +} + +// SetQuoteCharacter sets the QuoteCharacter field's value. +func (s *CSVInput) SetQuoteCharacter(v string) *CSVInput { + s.QuoteCharacter = &v + return s +} + +// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value. +func (s *CSVInput) SetQuoteEscapeCharacter(v string) *CSVInput { + s.QuoteEscapeCharacter = &v + return s +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *CSVInput) SetRecordDelimiter(v string) *CSVInput { + s.RecordDelimiter = &v + return s +} + +// Describes how uncompressed comma-separated values (CSV)-formatted results +// are formatted. +type CSVOutput struct { + _ struct{} `type:"structure"` + + // The value used to separate individual fields in a record. You can specify + // an arbitrary delimiter. + FieldDelimiter *string `type:"string"` + + // A single character used for escaping when the field delimiter is part of + // the value. For example, if the value is a, b, Amazon S3 wraps this field + // value in quotation marks, as follows: " a , b ". + QuoteCharacter *string `type:"string"` + + // The single character used for escaping the quote character inside an already + // escaped value. + QuoteEscapeCharacter *string `type:"string"` + + // Indicates whether to use quotation marks around output fields. + // + // * ALWAYS: Always use quotation marks for output fields. + // + // * ASNEEDED: Use quotation marks for output fields when needed. + QuoteFields *string `type:"string" enum:"QuoteFields"` + + // A single character used to separate individual records in the output. Instead + // of the default value, you can specify an arbitrary delimiter. + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CSVOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CSVOutput) GoString() string { + return s.String() +} + +// SetFieldDelimiter sets the FieldDelimiter field's value. +func (s *CSVOutput) SetFieldDelimiter(v string) *CSVOutput { + s.FieldDelimiter = &v + return s +} + +// SetQuoteCharacter sets the QuoteCharacter field's value. +func (s *CSVOutput) SetQuoteCharacter(v string) *CSVOutput { + s.QuoteCharacter = &v + return s +} + +// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value. +func (s *CSVOutput) SetQuoteEscapeCharacter(v string) *CSVOutput { + s.QuoteEscapeCharacter = &v + return s +} + +// SetQuoteFields sets the QuoteFields field's value. +func (s *CSVOutput) SetQuoteFields(v string) *CSVOutput { + s.QuoteFields = &v + return s +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *CSVOutput) SetRecordDelimiter(v string) *CSVOutput { + s.RecordDelimiter = &v + return s +} + +// Contains all the possible checksum or digest values for an object. +type Checksum struct { + _ struct{} `type:"structure"` + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Checksum) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Checksum) GoString() string { + return s.String() +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *Checksum) SetChecksumCRC32(v string) *Checksum { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *Checksum) SetChecksumCRC32C(v string) *Checksum { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *Checksum) SetChecksumSHA1(v string) *Checksum { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *Checksum) SetChecksumSHA256(v string) *Checksum { + s.ChecksumSHA256 = &v + return s +} + +// Container for specifying the Lambda notification configuration. +type CloudFunctionConfiguration struct { + _ struct{} `type:"structure"` + + // Lambda cloud function ARN that Amazon S3 can invoke when it detects events + // of the specified type. + CloudFunction *string `type:"string"` + + // The bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated + Event *string `deprecated:"true" type:"string" enum:"Event"` + + // Bucket events for which to send notifications. + Events []*string `locationName:"Event" type:"list" flattened:"true" enum:"Event"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The role supporting the invocation of the Lambda function + InvocationRole *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CloudFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CloudFunctionConfiguration) GoString() string { + return s.String() +} + +// SetCloudFunction sets the CloudFunction field's value. +func (s *CloudFunctionConfiguration) SetCloudFunction(v string) *CloudFunctionConfiguration { + s.CloudFunction = &v + return s +} + +// SetEvent sets the Event field's value. +func (s *CloudFunctionConfiguration) SetEvent(v string) *CloudFunctionConfiguration { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *CloudFunctionConfiguration) SetEvents(v []*string) *CloudFunctionConfiguration { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *CloudFunctionConfiguration) SetId(v string) *CloudFunctionConfiguration { + s.Id = &v + return s +} + +// SetInvocationRole sets the InvocationRole field's value. +func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionConfiguration { + s.InvocationRole = &v + return s +} + +// Container for all (if there are any) keys between Prefix and the next occurrence +// of the string specified by a delimiter. CommonPrefixes lists keys that act +// like subdirectories in the directory specified by Prefix. For example, if +// the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, +// the common prefix is notes/summer/. +type CommonPrefix struct { + _ struct{} `type:"structure"` + + // Container for the specified common prefix. + Prefix *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CommonPrefix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CommonPrefix) GoString() string { + return s.String() +} + +// SetPrefix sets the Prefix field's value. +func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix { + s.Prefix = &v + return s +} + +type CompleteMultipartUploadInput struct { + _ struct{} `locationName:"CompleteMultipartUploadRequest" type:"structure" payload:"MultipartUpload"` + + // Name of the bucket to which the multipart upload was initiated. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32 checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32C checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 160-bit SHA-1 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 256-bit SHA-256 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The container for the multipart upload request information. + MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The server-side encryption (SSE) algorithm used to encrypt the object. This + // parameter is required only when the object was created using a checksum algorithm + // or if your bucket policy requires the use of SSE-C. For more information, + // see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#ssec-require-condition-key) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // The server-side encryption (SSE) customer managed key. This parameter is + // needed only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CompleteMultipartUploadInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // The MD5 server-side encryption (SSE) customer managed key. This parameter + // is needed only when the object was created using a checksum algorithm. For + // more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // ID for the initiated multipart upload. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompleteMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompleteMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CompleteMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CompleteMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *CompleteMultipartUploadInput) SetBucket(v string) *CompleteMultipartUploadInput { + s.Bucket = &v + return s +} + +func (s *CompleteMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *CompleteMultipartUploadInput) SetChecksumCRC32(v string) *CompleteMultipartUploadInput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *CompleteMultipartUploadInput) SetChecksumCRC32C(v string) *CompleteMultipartUploadInput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *CompleteMultipartUploadInput) SetChecksumSHA1(v string) *CompleteMultipartUploadInput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *CompleteMultipartUploadInput) SetChecksumSHA256(v string) *CompleteMultipartUploadInput { + s.ChecksumSHA256 = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *CompleteMultipartUploadInput) SetExpectedBucketOwner(v string) *CompleteMultipartUploadInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CompleteMultipartUploadInput) SetKey(v string) *CompleteMultipartUploadInput { + s.Key = &v + return s +} + +// SetMultipartUpload sets the MultipartUpload field's value. +func (s *CompleteMultipartUploadInput) SetMultipartUpload(v *CompletedMultipartUpload) *CompleteMultipartUploadInput { + s.MultipartUpload = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CompleteMultipartUploadInput) SetRequestPayer(v string) *CompleteMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CompleteMultipartUploadInput) SetSSECustomerAlgorithm(v string) *CompleteMultipartUploadInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *CompleteMultipartUploadInput) SetSSECustomerKey(v string) *CompleteMultipartUploadInput { + s.SSECustomerKey = &v + return s +} + +func (s *CompleteMultipartUploadInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CompleteMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CompleteMultipartUploadInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartUploadInput { + s.UploadId = &v + return s +} + +func (s *CompleteMultipartUploadInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CompleteMultipartUploadInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s CompleteMultipartUploadInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type CompleteMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // The name of the bucket that contains the newly created object. Does not return + // the access point ARN or access point alias if used. + // + // Access points are not supported by directory buckets. + Bucket *string `type:"string"` + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `type:"string"` + + // Entity tag that identifies the newly created object's data. Objects with + // different object data will have different entity tags. The entity tag is + // an opaque string. The entity tag may or may not be an MD5 digest of the object + // data. If the entity tag is not an MD5 digest of the object data, it will + // contain one or more nonhexadecimal characters and/or will consist of less + // than 32 or more than 32 hexadecimal digits. For more information about how + // the entity tag is calculated, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ETag *string `type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL-encoded. + // + // This functionality is not supported for directory buckets. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The object key of the newly created object. + Key *string `min:"1" type:"string"` + + // The URI that identifies the newly created object. + Location *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CompleteMultipartUploadOutput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version ID of the newly created object, in case the bucket has versioning + // turned on. + // + // This functionality is not supported for directory buckets. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompleteMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompleteMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *CompleteMultipartUploadOutput) SetBucket(v string) *CompleteMultipartUploadOutput { + s.Bucket = &v + return s +} + +func (s *CompleteMultipartUploadOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CompleteMultipartUploadOutput) SetBucketKeyEnabled(v bool) *CompleteMultipartUploadOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *CompleteMultipartUploadOutput) SetChecksumCRC32(v string) *CompleteMultipartUploadOutput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *CompleteMultipartUploadOutput) SetChecksumCRC32C(v string) *CompleteMultipartUploadOutput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *CompleteMultipartUploadOutput) SetChecksumSHA1(v string) *CompleteMultipartUploadOutput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *CompleteMultipartUploadOutput) SetChecksumSHA256(v string) *CompleteMultipartUploadOutput { + s.ChecksumSHA256 = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *CompleteMultipartUploadOutput) SetExpiration(v string) *CompleteMultipartUploadOutput { + s.Expiration = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CompleteMultipartUploadOutput) SetKey(v string) *CompleteMultipartUploadOutput { + s.Key = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *CompleteMultipartUploadOutput) SetLocation(v string) *CompleteMultipartUploadOutput { + s.Location = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CompleteMultipartUploadOutput) SetRequestCharged(v string) *CompleteMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CompleteMultipartUploadOutput) SetSSEKMSKeyId(v string) *CompleteMultipartUploadOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CompleteMultipartUploadOutput) SetServerSideEncryption(v string) *CompleteMultipartUploadOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipartUploadOutput { + s.VersionId = &v + return s +} + +// The container for the completed multipart upload details. +type CompletedMultipartUpload struct { + _ struct{} `type:"structure"` + + // Array of CompletedPart data types. + // + // If you do not supply a valid Part with your request, the service sends back + // an HTTP 400 response. + Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompletedMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompletedMultipartUpload) GoString() string { + return s.String() +} + +// SetParts sets the Parts field's value. +func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultipartUpload { + s.Parts = v + return s +} + +// Details of the parts that were uploaded. +type CompletedPart struct { + _ struct{} `type:"structure"` + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `type:"string"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Part number that identifies the part. This is a positive integer between + // 1 and 10,000. + // + // * General purpose buckets - In CompleteMultipartUpload, when a additional + // checksum (including x-amz-checksum-crc32, x-amz-checksum-crc32c, x-amz-checksum-sha1, + // or x-amz-checksum-sha256) is applied to each part, the PartNumber must + // start at 1 and the part numbers must be consecutive. Otherwise, Amazon + // S3 generates an HTTP 400 Bad Request status code and an InvalidPartOrder + // error code. + // + // * Directory buckets - In CompleteMultipartUpload, the PartNumber must + // start at 1 and the part numbers must be consecutive. + PartNumber *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompletedPart) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompletedPart) GoString() string { + return s.String() +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *CompletedPart) SetChecksumCRC32(v string) *CompletedPart { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *CompletedPart) SetChecksumCRC32C(v string) *CompletedPart { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *CompletedPart) SetChecksumSHA1(v string) *CompletedPart { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *CompletedPart) SetChecksumSHA256(v string) *CompletedPart { + s.ChecksumSHA256 = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *CompletedPart) SetETag(v string) *CompletedPart { + s.ETag = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart { + s.PartNumber = &v + return s +} + +// A container for describing a condition that must be met for the specified +// redirect to apply. For example, 1. If request is for pages in the /docs folder, +// redirect to the /documents folder. 2. If request results in HTTP error 4xx, +// redirect request to another host where you might process the error. +type Condition struct { + _ struct{} `type:"structure"` + + // The HTTP error code when the redirect is applied. In the event of an error, + // if the error code equals this value, then the specified redirect is applied. + // Required when parent element Condition is specified and sibling KeyPrefixEquals + // is not specified. If both are specified, then both must be true for the redirect + // to be applied. + HttpErrorCodeReturnedEquals *string `type:"string"` + + // The object key name prefix when the redirect is applied. For example, to + // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. + // To redirect request for all pages with the prefix docs/, the key prefix will + // be /docs, which identifies all objects in the docs/ folder. Required when + // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals + // is not specified. If both conditions are specified, both must be true for + // the redirect to be applied. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + KeyPrefixEquals *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Condition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Condition) GoString() string { + return s.String() +} + +// SetHttpErrorCodeReturnedEquals sets the HttpErrorCodeReturnedEquals field's value. +func (s *Condition) SetHttpErrorCodeReturnedEquals(v string) *Condition { + s.HttpErrorCodeReturnedEquals = &v + return s +} + +// SetKeyPrefixEquals sets the KeyPrefixEquals field's value. +func (s *Condition) SetKeyPrefixEquals(v string) *Condition { + s.KeyPrefixEquals = &v + return s +} + +type ContinuationEvent struct { + _ struct{} `locationName:"ContinuationEvent" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ContinuationEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ContinuationEvent) GoString() string { + return s.String() +} + +// The ContinuationEvent is and event in the SelectObjectContentEventStream group of events. +func (s *ContinuationEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the ContinuationEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *ContinuationEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *ContinuationEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + return msg, err +} + +type CopyObjectInput struct { + _ struct{} `locationName:"CopyObjectRequest" type:"structure"` + + // The canned access control list (ACL) to apply to the object. + // + // When you copy an object, the ACL metadata is not preserved and is set to + // private by default. Only the owner has full access control. To override the + // default ACL setting, specify a new ACL when you generate a copy request. + // For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). + // + // If the destination bucket that you're copying objects to uses the bucket + // owner enforced setting for S3 Object Ownership, ACLs are disabled and no + // longer affect permissions. Buckets that use this setting only accept PUT + // requests that don't specify an ACL or PUT requests that specify bucket owner + // full control ACLs, such as the bucket-owner-full-control canned ACL or an + // equivalent form of this ACL expressed in the XML format. For more information, + // see Controlling ownership of objects and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. + // + // * If your destination bucket uses the bucket owner enforced setting for + // Object Ownership, all objects written to the bucket by any account will + // be owned by the bucket owner. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // The name of the destination bucket. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). + // If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the + // object. + // + // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for + // object encryption with SSE-KMS. Specifying this header with a COPY action + // doesn’t affect bucket-level settings for S3 Bucket Key. + // + // For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Specifies the caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Indicates the algorithm that you want Amazon S3 to use to create the checksum + // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // When you copy an object, if the source object has a checksum, that checksum + // value will be copied to the new object by default. If the CopyObject request + // does not include this x-amz-checksum-algorithm header, the checksum algorithm + // will be copied from the source object to the destination object (if it's + // present on the source object). You can optionally specify a different checksum + // algorithm to use with the x-amz-checksum-algorithm header. Unrecognized or + // unsupported values will respond with the HTTP status code 400 Bad Request. + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // Specifies presentational information for the object. Indicates whether an + // object should be displayed in a web browser or downloaded as a file. It allows + // specifying the desired filename for the downloaded file. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + // + // For directory buckets, only the aws-chunked value is supported in this header + // field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type that describes the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies the source object for the copy operation. The source object can + // be up to 5 GB. If the source object is an object that was uploaded by using + // a multipart upload, the object copy will be a single part object after the + // source object is copied to the destination bucket. + // + // You specify the value of the copy source in one of two formats, depending + // on whether you want to access the source object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html): + // + // * For objects not accessed through an access point, specify the name of + // the source bucket and the key of the source object, separated by a slash + // (/). For example, to copy the object reports/january.pdf from the general + // purpose bucket awsexamplebucket, use awsexamplebucket/reports/january.pdf. + // The value must be URL-encoded. To copy the object reports/january.pdf + // from the directory bucket awsexamplebucket--use1-az5--x-s3, use awsexamplebucket--use1-az5--x-s3/reports/january.pdf. + // The value must be URL-encoded. + // + // * For objects accessed through access points, specify the Amazon Resource + // Name (ARN) of the object as accessed through the access point, in the + // format arn:aws:s3:::accesspoint//object/. + // For example, to copy the object reports/january.pdf through access point + // my-access-point owned by account 123456789012 in Region us-west-2, use + // the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. + // The value must be URL encoded. Amazon S3 supports copy operations using + // Access points only when the source and destination buckets are in the + // same Amazon Web Services Region. Access points are not supported by directory + // buckets. Alternatively, for objects accessed through Amazon S3 on Outposts, + // specify the ARN of the object as accessed in the format arn:aws:s3-outposts:::outpost//object/. + // For example, to copy the object reports/january.pdf through outpost my-outpost + // owned by account 123456789012 in Region us-west-2, use the URL encoding + // of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. + // The value must be URL-encoded. + // + // If your source bucket versioning is enabled, the x-amz-copy-source header + // by default identifies the current version of an object to copy. If the current + // version is a delete marker, Amazon S3 behaves as if the object was deleted. + // To copy a different version, use the versionId query parameter. Specifically, + // append ?versionId= to the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). + // If you don't specify a version ID, Amazon S3 copies the latest version of + // the source object. + // + // If you enable versioning on the destination bucket, Amazon S3 generates a + // unique version ID for the copied object. This version ID is different from + // the version ID of the source object. Amazon S3 returns the version ID of + // the copied object in the x-amz-version-id response header in the response. + // + // If you do not enable versioning or suspend it on the destination bucket, + // the version ID that Amazon S3 generates in the x-amz-version-id response + // header is always null. + // + // Directory buckets - S3 Versioning isn't enabled and supported for directory + // buckets. + // + // CopySource is a required field + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + // + // If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // headers are present in the request and evaluate as follows, Amazon S3 returns + // 200 OK and copies the data: + // + // * x-amz-copy-source-if-match condition evaluates to true + // + // * x-amz-copy-source-if-unmodified-since condition evaluates to false + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + // + // If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since + // headers are present in the request and evaluate as follows, Amazon S3 returns + // the 412 Precondition Failed response code: + // + // * x-amz-copy-source-if-none-match condition evaluates to false + // + // * x-amz-copy-source-if-modified-since condition evaluates to true + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + // + // If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since + // headers are present in the request and evaluate as follows, Amazon S3 returns + // the 412 Precondition Failed response code: + // + // * x-amz-copy-source-if-none-match condition evaluates to false + // + // * x-amz-copy-source-if-modified-since condition evaluates to true + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + // + // If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // headers are present in the request and evaluate as follows, Amazon S3 returns + // 200 OK and copies the data: + // + // * x-amz-copy-source-if-match condition evaluates to true + // + // * x-amz-copy-source-if-unmodified-since condition evaluates to false + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` + + // Specifies the algorithm to use when decrypting the source object (for example, + // AES256). + // + // If the source object for the copy is stored in Amazon S3 using SSE-C, you + // must provide the necessary encryption information in your request so that + // Amazon S3 can decrypt the object for copying. + // + // This functionality is not supported when the source object is in a directory + // bucket. + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be the + // same one that was used when the source object was created. + // + // If the source object for the copy is stored in Amazon S3 using SSE-C, you + // must provide the necessary encryption information in your request so that + // Amazon S3 can decrypt the object for copying. + // + // This functionality is not supported when the source object is in a directory + // bucket. + // + // CopySourceSSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectInput's + // String and GoString methods. + CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + // + // If the source object for the copy is stored in Amazon S3 using SSE-C, you + // must provide the necessary encryption information in your request so that + // Amazon S3 can decrypt the object for copying. + // + // This functionality is not supported when the source object is in a directory + // bucket. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + // The account ID of the expected destination bucket owner. If the account ID + // that you provide does not match the actual owner of the destination bucket, + // the request fails with the HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The account ID of the expected source bucket owner. If the account ID that + // you provide does not match the actual owner of the source bucket, the request + // fails with the HTTP status code 403 Forbidden (access denied). + ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // The key of the destination object. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether the metadata is copied from the source object or replaced + // with metadata that's provided in the request. When copying an object, you + // can preserve all metadata (the default) or specify new metadata. If this + // header isn’t specified, COPY is the default behavior. + // + // General purpose bucket - For general purpose buckets, when you grant permissions, + // you can use the s3:x-amz-metadata-directive condition key to enforce certain + // metadata behavior when objects are uploaded. For more information, see Amazon + // S3 condition key examples (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) + // in the Amazon S3 User Guide. + // + // x-amz-website-redirect-location is unique to each object and is not copied + // when using the x-amz-metadata-directive header. To copy the value, you must + // specify x-amz-website-redirect-location in the request header. + MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` + + // Specifies whether you want to apply a legal hold to the object copy. + // + // This functionality is not supported for directory buckets. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode that you want to apply to the object copy. + // + // This functionality is not supported for directory buckets. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when you want the Object Lock of the object copy to expire. + // + // This functionality is not supported for directory buckets. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // When you perform a CopyObject operation, if you want to use a different type + // of encryption setting for the target object, you can specify appropriate + // encryption-related headers to encrypt the target object with an Amazon S3 + // managed key, a KMS key, or a customer-provided key. If the encryption setting + // in your request is different from the default encryption configuration of + // the destination bucket, the encryption setting in your request takes precedence. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded. Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the Amazon Web Services KMS Encryption Context to use for object + // encryption. The value of this header is a base64-encoded UTF-8 string holding + // JSON with the encryption context key-value pairs. This value must be explicitly + // added to specify encryption context for CopyObject requests. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectInput's + // String and GoString methods. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for object encryption. + // All GET and PUT requests for an object protected by KMS will fail if they're + // not made via SSL or using SigV4. For information about configuring any of + // the officially supported Amazon Web Services SDKs and Amazon Web Services + // CLI, see Specifying the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // in the Amazon S3 User Guide. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectInput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms, aws:kms:dsse). Unrecognized or unsupported + // values won’t write a destination object and will receive a 400 Bad Request + // response. + // + // Amazon S3 automatically encrypts all new objects that are copied to an S3 + // bucket. When copying an object, if you don't specify encryption information + // in your copy request, the encryption setting of the target object is set + // to the default encryption configuration of the destination bucket. By default, + // all buckets have a base level of encryption configuration that uses server-side + // encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket + // has a default encryption configuration that uses server-side encryption with + // Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption + // with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with + // customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding + // KMS key, or a customer-provided key to encrypt the target object copy. + // + // When you perform a CopyObject operation, if you want to use a different type + // of encryption setting for the target object, you can specify appropriate + // encryption-related headers to encrypt the target object with an Amazon S3 + // managed key, a KMS key, or a customer-provided key. If the encryption setting + // in your request is different from the default encryption configuration of + // the destination bucket, the encryption setting in your request takes precedence. + // + // With server-side encryption, Amazon S3 encrypts your data as it writes your + // data to disks in its data centers and decrypts the data when you access it. + // For more information about server-side encryption, see Using Server-Side + // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) + // in the Amazon S3 User Guide. + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // If the x-amz-storage-class header is not used, the copied object will be + // stored in the STANDARD Storage Class by default. The STANDARD storage class + // provides high durability and high availability. Depending on performance + // needs, you can specify a different Storage Class. + // + // * Directory buckets - For directory buckets, only the S3 Express One Zone + // storage class is supported to store newly created objects. Unsupported + // storage class values won't write a destination object and will respond + // with the HTTP status code 400 Bad Request. + // + // * Amazon S3 on Outposts - S3 on Outposts only uses the OUTPOSTS Storage + // Class. + // + // You can use the CopyObject action to change the storage class of an object + // that is already stored in Amazon S3 by using the x-amz-storage-class header. + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 User Guide. + // + // Before using an object as a source object for the copy operation, you must + // restore a copy of it if it meets any of the following conditions: + // + // * The storage class of the source object is GLACIER or DEEP_ARCHIVE. + // + // * The storage class of the source object is INTELLIGENT_TIERING and it's + // S3 Intelligent-Tiering access tier (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition) + // is Archive Access or Deep Archive Access. + // + // For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) + // and Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html) + // in the Amazon S3 User Guide. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object copy in the destination bucket. This value must + // be used in conjunction with the x-amz-tagging-directive if you choose REPLACE + // for the x-amz-tagging-directive. If you choose COPY for the x-amz-tagging-directive, + // you don't need to set the x-amz-tagging header, because the tag-set will + // be copied from the source object directly. The tag-set must be encoded as + // URL Query parameters. + // + // The default value is the empty value. + // + // Directory buckets - For directory buckets in a CopyObject operation, only + // the empty tag-set is supported. Any requests that attempt to write non-empty + // tags into directory buckets will receive a 501 Not Implemented status code. + // When the destination bucket is a directory bucket, you will receive a 501 + // Not Implemented response in any of the following situations: + // + // * When you attempt to COPY the tag-set from an S3 source object that has + // non-empty tags. + // + // * When you attempt to REPLACE the tag-set of a source object and set a + // non-empty value to x-amz-tagging. + // + // * When you don't set the x-amz-tagging-directive header and the source + // object has non-empty tags. This is because the default value of x-amz-tagging-directive + // is COPY. + // + // Because only the empty tag-set is supported for directory buckets in a CopyObject + // operation, the following situations are allowed: + // + // * When you attempt to COPY the tag-set from a directory bucket source + // object that has no tags to a general purpose bucket. It copies an empty + // tag-set to the destination object. + // + // * When you attempt to REPLACE the tag-set of a directory bucket source + // object and set the x-amz-tagging value of the directory bucket destination + // object to empty. + // + // * When you attempt to REPLACE the tag-set of a general purpose bucket + // source object that has non-empty tags and set the x-amz-tagging value + // of the directory bucket destination object to empty. + // + // * When you attempt to REPLACE the tag-set of a directory bucket source + // object and don't set the x-amz-tagging value of the directory bucket destination + // object. This is because the default value of x-amz-tagging is the empty + // value. + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // Specifies whether the object tag-set is copied from the source object or + // replaced with the tag-set that's provided in the request. + // + // The default value is COPY. + // + // Directory buckets - For directory buckets in a CopyObject operation, only + // the empty tag-set is supported. Any requests that attempt to write non-empty + // tags into directory buckets will receive a 501 Not Implemented status code. + // When the destination bucket is a directory bucket, you will receive a 501 + // Not Implemented response in any of the following situations: + // + // * When you attempt to COPY the tag-set from an S3 source object that has + // non-empty tags. + // + // * When you attempt to REPLACE the tag-set of a source object and set a + // non-empty value to x-amz-tagging. + // + // * When you don't set the x-amz-tagging-directive header and the source + // object has non-empty tags. This is because the default value of x-amz-tagging-directive + // is COPY. + // + // Because only the empty tag-set is supported for directory buckets in a CopyObject + // operation, the following situations are allowed: + // + // * When you attempt to COPY the tag-set from a directory bucket source + // object that has no tags to a general purpose bucket. It copies an empty + // tag-set to the destination object. + // + // * When you attempt to REPLACE the tag-set of a directory bucket source + // object and set the x-amz-tagging value of the directory bucket destination + // object to empty. + // + // * When you attempt to REPLACE the tag-set of a general purpose bucket + // source object that has non-empty tags and set the x-amz-tagging value + // of the directory bucket destination object to empty. + // + // * When you attempt to REPLACE the tag-set of a directory bucket source + // object and don't set the x-amz-tagging value of the directory bucket destination + // object. This is because the default value of x-amz-tagging is the empty + // value. + TaggingDirective *string `location:"header" locationName:"x-amz-tagging-directive" type:"string" enum:"TaggingDirective"` + + // If the destination bucket is configured as a website, redirects requests + // for this object copy to another object in the same bucket or to an external + // URL. Amazon S3 stores the value of this header in the object metadata. This + // value is unique to each object and is not copied when using the x-amz-metadata-directive + // header. Instead, you may opt to provide this header in combination with the + // x-amz-metadata-directive header. + // + // This functionality is not supported for directory buckets. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CopyObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CopyObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.CopySource == nil { + invalidParams.Add(request.NewErrParamRequired("CopySource")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CopyObjectInput) SetACL(v string) *CopyObjectInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CopyObjectInput) SetBucket(v string) *CopyObjectInput { + s.Bucket = &v + return s +} + +func (s *CopyObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CopyObjectInput) SetBucketKeyEnabled(v bool) *CopyObjectInput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput { + s.CacheControl = &v + return s +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *CopyObjectInput) SetChecksumAlgorithm(v string) *CopyObjectInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *CopyObjectInput) SetContentDisposition(v string) *CopyObjectInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *CopyObjectInput) SetContentEncoding(v string) *CopyObjectInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *CopyObjectInput) SetContentLanguage(v string) *CopyObjectInput { + s.ContentLanguage = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *CopyObjectInput) SetContentType(v string) *CopyObjectInput { + s.ContentType = &v + return s +} + +// SetCopySource sets the CopySource field's value. +func (s *CopyObjectInput) SetCopySource(v string) *CopyObjectInput { + s.CopySource = &v + return s +} + +// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. +func (s *CopyObjectInput) SetCopySourceIfMatch(v string) *CopyObjectInput { + s.CopySourceIfMatch = &v + return s +} + +// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. +func (s *CopyObjectInput) SetCopySourceIfModifiedSince(v time.Time) *CopyObjectInput { + s.CopySourceIfModifiedSince = &v + return s +} + +// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. +func (s *CopyObjectInput) SetCopySourceIfNoneMatch(v string) *CopyObjectInput { + s.CopySourceIfNoneMatch = &v + return s +} + +// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. +func (s *CopyObjectInput) SetCopySourceIfUnmodifiedSince(v time.Time) *CopyObjectInput { + s.CopySourceIfUnmodifiedSince = &v + return s +} + +// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerAlgorithm(v string) *CopyObjectInput { + s.CopySourceSSECustomerAlgorithm = &v + return s +} + +// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerKey(v string) *CopyObjectInput { + s.CopySourceSSECustomerKey = &v + return s +} + +func (s *CopyObjectInput) getCopySourceSSECustomerKey() (v string) { + if s.CopySourceSSECustomerKey == nil { + return v + } + return *s.CopySourceSSECustomerKey +} + +// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerKeyMD5(v string) *CopyObjectInput { + s.CopySourceSSECustomerKeyMD5 = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *CopyObjectInput) SetExpectedBucketOwner(v string) *CopyObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetExpectedSourceBucketOwner sets the ExpectedSourceBucketOwner field's value. +func (s *CopyObjectInput) SetExpectedSourceBucketOwner(v string) *CopyObjectInput { + s.ExpectedSourceBucketOwner = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *CopyObjectInput) SetExpires(v time.Time) *CopyObjectInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CopyObjectInput) SetGrantFullControl(v string) *CopyObjectInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CopyObjectInput) SetGrantRead(v string) *CopyObjectInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CopyObjectInput) SetGrantReadACP(v string) *CopyObjectInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CopyObjectInput) SetGrantWriteACP(v string) *CopyObjectInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CopyObjectInput) SetKey(v string) *CopyObjectInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *CopyObjectInput) SetMetadata(v map[string]*string) *CopyObjectInput { + s.Metadata = v + return s +} + +// SetMetadataDirective sets the MetadataDirective field's value. +func (s *CopyObjectInput) SetMetadataDirective(v string) *CopyObjectInput { + s.MetadataDirective = &v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *CopyObjectInput) SetObjectLockLegalHoldStatus(v string) *CopyObjectInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *CopyObjectInput) SetObjectLockMode(v string) *CopyObjectInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *CopyObjectInput) SetObjectLockRetainUntilDate(v time.Time) *CopyObjectInput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CopyObjectInput) SetRequestPayer(v string) *CopyObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CopyObjectInput) SetSSECustomerAlgorithm(v string) *CopyObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *CopyObjectInput) SetSSECustomerKey(v string) *CopyObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *CopyObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CopyObjectInput) SetSSEKMSEncryptionContext(v string) *CopyObjectInput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CopyObjectInput) SetServerSideEncryption(v string) *CopyObjectInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *CopyObjectInput) SetStorageClass(v string) *CopyObjectInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *CopyObjectInput) SetTagging(v string) *CopyObjectInput { + s.Tagging = &v + return s +} + +// SetTaggingDirective sets the TaggingDirective field's value. +func (s *CopyObjectInput) SetTaggingDirective(v string) *CopyObjectInput { + s.TaggingDirective = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput { + s.WebsiteRedirectLocation = &v + return s +} + +func (s *CopyObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CopyObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s CopyObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type CopyObjectOutput struct { + _ struct{} `type:"structure" payload:"CopyObjectResult"` + + // Indicates whether the copied object uses an S3 Bucket Key for server-side + // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Container for all response elements. + CopyObjectResult *CopyObjectResult `type:"structure"` + + // Version ID of the source object that was copied. + // + // This functionality is not supported when the source object is in a directory + // bucket. + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If the object expiration is configured, the response includes this header. + // + // This functionality is not supported for directory buckets. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to confirm the encryption algorithm + // that's used. + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide the round-trip message integrity + // verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, indicates the Amazon Web Services KMS Encryption Context to use + // for object encryption. The value of this header is a base64-encoded UTF-8 + // string holding JSON with the encryption context key-value pairs. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectOutput's + // String and GoString methods. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectOutput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256, aws:kms, aws:kms:dsse). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version ID of the newly created copy. + // + // This functionality is not supported for directory buckets. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CopyObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CopyObjectOutput) GoString() string { + return s.String() +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CopyObjectOutput) SetBucketKeyEnabled(v bool) *CopyObjectOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetCopyObjectResult sets the CopyObjectResult field's value. +func (s *CopyObjectOutput) SetCopyObjectResult(v *CopyObjectResult) *CopyObjectOutput { + s.CopyObjectResult = v + return s +} + +// SetCopySourceVersionId sets the CopySourceVersionId field's value. +func (s *CopyObjectOutput) SetCopySourceVersionId(v string) *CopyObjectOutput { + s.CopySourceVersionId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *CopyObjectOutput) SetExpiration(v string) *CopyObjectOutput { + s.Expiration = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CopyObjectOutput) SetRequestCharged(v string) *CopyObjectOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CopyObjectOutput) SetSSECustomerAlgorithm(v string) *CopyObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CopyObjectOutput) SetSSEKMSEncryptionContext(v string) *CopyObjectOutput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CopyObjectOutput) SetServerSideEncryption(v string) *CopyObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput { + s.VersionId = &v + return s +} + +// Container for all response elements. +type CopyObjectResult struct { + _ struct{} `type:"structure"` + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `type:"string"` + + // Returns the ETag of the new object. The ETag reflects only changes to the + // contents of an object, not its metadata. + ETag *string `type:"string"` + + // Creation date of the object. + LastModified *time.Time `type:"timestamp"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CopyObjectResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CopyObjectResult) GoString() string { + return s.String() +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *CopyObjectResult) SetChecksumCRC32(v string) *CopyObjectResult { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *CopyObjectResult) SetChecksumCRC32C(v string) *CopyObjectResult { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *CopyObjectResult) SetChecksumSHA1(v string) *CopyObjectResult { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *CopyObjectResult) SetChecksumSHA256(v string) *CopyObjectResult { + s.ChecksumSHA256 = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *CopyObjectResult) SetETag(v string) *CopyObjectResult { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult { + s.LastModified = &v + return s +} + +// Container for all response elements. +type CopyPartResult struct { + _ struct{} `type:"structure"` + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `type:"string"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // Date and time at which the object was uploaded. + LastModified *time.Time `type:"timestamp"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CopyPartResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CopyPartResult) GoString() string { + return s.String() +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *CopyPartResult) SetChecksumCRC32(v string) *CopyPartResult { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *CopyPartResult) SetChecksumCRC32C(v string) *CopyPartResult { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *CopyPartResult) SetChecksumSHA1(v string) *CopyPartResult { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *CopyPartResult) SetChecksumSHA256(v string) *CopyPartResult { + s.ChecksumSHA256 = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *CopyPartResult) SetETag(v string) *CopyPartResult { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult { + s.LastModified = &v + return s +} + +// The configuration information for the bucket. +type CreateBucketConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the information about the bucket that will be created. + // + // This functionality is only supported by directory buckets. + Bucket *BucketInfo `type:"structure"` + + // Specifies the location where the bucket will be created. + // + // For directory buckets, the location type is Availability Zone. + // + // This functionality is only supported by directory buckets. + Location *LocationInfo `type:"structure"` + + // Specifies the Region where the bucket will be created. You might choose a + // Region to optimize latency, minimize costs, or address regulatory requirements. + // For example, if you reside in Europe, you will probably find it advantageous + // to create buckets in the Europe (Ireland) Region. For more information, see + // Accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + // in the Amazon S3 User Guide. + // + // If you don't specify a Region, the bucket is created in the US East (N. Virginia) + // Region (us-east-1) by default. + // + // This functionality is not supported for directory buckets. + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBucketConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBucketConfiguration) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *CreateBucketConfiguration) SetBucket(v *BucketInfo) *CreateBucketConfiguration { + s.Bucket = v + return s +} + +func (s *CreateBucketConfiguration) getBucket() (v *BucketInfo) { + return s.Bucket +} + +// SetLocation sets the Location field's value. +func (s *CreateBucketConfiguration) SetLocation(v *LocationInfo) *CreateBucketConfiguration { + s.Location = v + return s +} + +// SetLocationConstraint sets the LocationConstraint field's value. +func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucketConfiguration { + s.LocationConstraint = &v + return s +} + +type CreateBucketInput struct { + _ struct{} `locationName:"CreateBucketRequest" type:"structure" payload:"CreateBucketConfiguration"` + + // The canned ACL to apply to the bucket. + // + // This functionality is not supported for directory buckets. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + // The name of the bucket to create. + // + // General purpose buckets - For information about bucket naming restrictions, + // see Bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) + // in the Amazon S3 User Guide. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name + // . Virtual-hosted-style requests aren't supported. Directory bucket names + // must be unique in the chosen Availability Zone. Bucket names must also follow + // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). + // For information about bucket naming restrictions, see Directory bucket naming + // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The configuration information for the bucket. + CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + // + // This functionality is not supported for directory buckets. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + // + // This functionality is not supported for directory buckets. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + // + // This functionality is not supported for directory buckets. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create new objects in the bucket. + // + // For the bucket and object owners of existing objects, also allows deletions + // and overwrites of those objects. + // + // This functionality is not supported for directory buckets. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + // + // This functionality is not supported for directory buckets. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Specifies whether you want S3 Object Lock to be enabled for the new bucket. + // + // This functionality is not supported for directory buckets. + ObjectLockEnabledForBucket *bool `location:"header" locationName:"x-amz-bucket-object-lock-enabled" type:"boolean"` + + // The container element for object ownership for a bucket's ownership controls. + // + // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to + // the bucket owner if the objects are uploaded with the bucket-owner-full-control + // canned ACL. + // + // ObjectWriter - The uploading account will own the object if the object is + // uploaded with the bucket-owner-full-control canned ACL. + // + // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer + // affect permissions. The bucket owner automatically owns and has full control + // over every object in the bucket. The bucket only accepts PUT requests that + // don't specify an ACL or specify bucket owner full control ACLs (such as the + // predefined bucket-owner-full-control canned ACL or a custom ACL in XML format + // that grants the same permissions). + // + // By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are disabled. + // We recommend keeping ACLs disabled, except in uncommon use cases where you + // must control access for each object individually. For more information about + // S3 Object Ownership, see Controlling ownership of objects and disabling ACLs + // for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. Directory buckets + // use the bucket owner enforced setting for S3 Object Ownership. + ObjectOwnership *string `location:"header" locationName:"x-amz-object-ownership" type:"string" enum:"ObjectOwnership"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CreateBucketInput) SetACL(v string) *CreateBucketInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateBucketInput) SetBucket(v string) *CreateBucketInput { + s.Bucket = &v + return s +} + +func (s *CreateBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCreateBucketConfiguration sets the CreateBucketConfiguration field's value. +func (s *CreateBucketInput) SetCreateBucketConfiguration(v *CreateBucketConfiguration) *CreateBucketInput { + s.CreateBucketConfiguration = v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CreateBucketInput) SetGrantFullControl(v string) *CreateBucketInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CreateBucketInput) SetGrantRead(v string) *CreateBucketInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CreateBucketInput) SetGrantReadACP(v string) *CreateBucketInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *CreateBucketInput) SetGrantWrite(v string) *CreateBucketInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput { + s.GrantWriteACP = &v + return s +} + +// SetObjectLockEnabledForBucket sets the ObjectLockEnabledForBucket field's value. +func (s *CreateBucketInput) SetObjectLockEnabledForBucket(v bool) *CreateBucketInput { + s.ObjectLockEnabledForBucket = &v + return s +} + +// SetObjectOwnership sets the ObjectOwnership field's value. +func (s *CreateBucketInput) SetObjectOwnership(v string) *CreateBucketInput { + s.ObjectOwnership = &v + return s +} + +type CreateBucketOutput struct { + _ struct{} `type:"structure"` + + // A forward slash followed by the name of the bucket. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBucketOutput) GoString() string { + return s.String() +} + +// SetLocation sets the Location field's value. +func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput { + s.Location = &v + return s +} + +type CreateMultipartUploadInput struct { + _ struct{} `locationName:"CreateMultipartUploadRequest" type:"structure"` + + // The canned ACL to apply to the object. Amazon S3 supports a set of predefined + // ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees + // and permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) + // in the Amazon S3 User Guide. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can grant access permissions to individual + // Amazon Web Services accounts or to predefined groups defined by Amazon S3. + // These permissions are then added to the access control list (ACL) on the + // new object. For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). + // One way to grant the permissions using the request headers is to specify + // a canned ACL with the x-amz-acl request header. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // The name of the bucket where the multipart upload is initiated and where + // the object is uploaded. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). + // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for + // object encryption with SSE-KMS. + // + // Specifying this header with an object action doesn’t affect bucket-level + // settings for S3 Bucket Key. + // + // This functionality is not supported for directory buckets. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Indicates the algorithm that you want Amazon S3 to use to create the checksum + // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + // + // For directory buckets, only the aws-chunked value is supported in this header + // field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language that the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Specify access permissions explicitly to give the grantee READ, READ_ACP, + // and WRITE_ACP permissions on the object. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header + // maps to specific permissions that Amazon S3 supports in an ACL. For more + // information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // + // * id – if the value specified is the canonical user ID of an Amazon + // Web Services account + // + // * uri – if you are granting permissions to a predefined group + // + // * emailAddress – if the value specified is the email address of an Amazon + // Web Services account Using email addresses to specify a grantee is only + // supported in the following Amazon Web Services Regions: US East (N. Virginia) + // US West (N. California) US West (Oregon) Asia Pacific (Singapore) Asia + // Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) South America (São + // Paulo) For a list of all the Amazon S3 supported Regions and endpoints, + // see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + // + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data + // and its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Specify access permissions explicitly to allow grantee to read the object + // data and its metadata. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header + // maps to specific permissions that Amazon S3 supports in an ACL. For more + // information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // + // * id – if the value specified is the canonical user ID of an Amazon + // Web Services account + // + // * uri – if you are granting permissions to a predefined group + // + // * emailAddress – if the value specified is the email address of an Amazon + // Web Services account Using email addresses to specify a grantee is only + // supported in the following Amazon Web Services Regions: US East (N. Virginia) + // US West (N. California) US West (Oregon) Asia Pacific (Singapore) Asia + // Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) South America (São + // Paulo) For a list of all the Amazon S3 supported Regions and endpoints, + // see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + // + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data + // and its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Specify access permissions explicitly to allows grantee to read the object + // ACL. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header + // maps to specific permissions that Amazon S3 supports in an ACL. For more + // information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // + // * id – if the value specified is the canonical user ID of an Amazon + // Web Services account + // + // * uri – if you are granting permissions to a predefined group + // + // * emailAddress – if the value specified is the email address of an Amazon + // Web Services account Using email addresses to specify a grantee is only + // supported in the following Amazon Web Services Regions: US East (N. Virginia) + // US West (N. California) US West (Oregon) Asia Pacific (Singapore) Asia + // Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) South America (São + // Paulo) For a list of all the Amazon S3 supported Regions and endpoints, + // see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + // + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data + // and its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Specify access permissions explicitly to allows grantee to allow grantee + // to write the ACL for the applicable object. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header + // maps to specific permissions that Amazon S3 supports in an ACL. For more + // information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // + // * id – if the value specified is the canonical user ID of an Amazon + // Web Services account + // + // * uri – if you are granting permissions to a predefined group + // + // * emailAddress – if the value specified is the email address of an Amazon + // Web Services account Using email addresses to specify a grantee is only + // supported in the following Amazon Web Services Regions: US East (N. Virginia) + // US West (N. California) US West (Oregon) Asia Pacific (Singapore) Asia + // Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) South America (São + // Paulo) For a list of all the Amazon S3 supported Regions and endpoints, + // see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + // + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data + // and its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Object key for which the multipart upload is to be initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether you want to apply a legal hold to the uploaded object. + // + // This functionality is not supported for directory buckets. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // Specifies the Object Lock mode that you want to apply to the uploaded object. + // + // This functionality is not supported for directory buckets. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // Specifies the date and time when you want the Object Lock to expire. + // + // This functionality is not supported for directory buckets. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + // + // This functionality is not supported for directory buckets. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateMultipartUploadInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the customer-provided encryption key + // according to RFC 1321. Amazon S3 uses this header for a message integrity + // check to ensure that the encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the Amazon Web Services KMS Encryption Context to use for object + // encryption. The value of this header is a base64-encoded UTF-8 string holding + // JSON with the encryption context key-value pairs. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateMultipartUploadInput's + // String and GoString methods. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric encryption + // customer managed key to use for object encryption. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateMultipartUploadInput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256, aws:kms). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high availability. + // Depending on performance needs, you can specify a different Storage Class. + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 User Guide. + // + // * For directory buckets, only the S3 Express One Zone storage class is + // supported to store newly created objects. + // + // * Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + // + // This functionality is not supported for directory buckets. + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + // + // This functionality is not supported for directory buckets. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CreateMultipartUploadInput) SetACL(v string) *CreateMultipartUploadInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateMultipartUploadInput) SetBucket(v string) *CreateMultipartUploadInput { + s.Bucket = &v + return s +} + +func (s *CreateMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CreateMultipartUploadInput) SetBucketKeyEnabled(v bool) *CreateMultipartUploadInput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartUploadInput { + s.CacheControl = &v + return s +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *CreateMultipartUploadInput) SetChecksumAlgorithm(v string) *CreateMultipartUploadInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *CreateMultipartUploadInput) SetContentDisposition(v string) *CreateMultipartUploadInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *CreateMultipartUploadInput) SetContentEncoding(v string) *CreateMultipartUploadInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *CreateMultipartUploadInput) SetContentLanguage(v string) *CreateMultipartUploadInput { + s.ContentLanguage = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *CreateMultipartUploadInput) SetContentType(v string) *CreateMultipartUploadInput { + s.ContentType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *CreateMultipartUploadInput) SetExpectedBucketOwner(v string) *CreateMultipartUploadInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *CreateMultipartUploadInput) SetExpires(v time.Time) *CreateMultipartUploadInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CreateMultipartUploadInput) SetGrantFullControl(v string) *CreateMultipartUploadInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CreateMultipartUploadInput) SetGrantRead(v string) *CreateMultipartUploadInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CreateMultipartUploadInput) SetGrantReadACP(v string) *CreateMultipartUploadInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CreateMultipartUploadInput) SetGrantWriteACP(v string) *CreateMultipartUploadInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CreateMultipartUploadInput) SetKey(v string) *CreateMultipartUploadInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *CreateMultipartUploadInput) SetMetadata(v map[string]*string) *CreateMultipartUploadInput { + s.Metadata = v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *CreateMultipartUploadInput) SetObjectLockLegalHoldStatus(v string) *CreateMultipartUploadInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *CreateMultipartUploadInput) SetObjectLockMode(v string) *CreateMultipartUploadInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *CreateMultipartUploadInput) SetObjectLockRetainUntilDate(v time.Time) *CreateMultipartUploadInput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CreateMultipartUploadInput) SetRequestPayer(v string) *CreateMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerKey(v string) *CreateMultipartUploadInput { + s.SSECustomerKey = &v + return s +} + +func (s *CreateMultipartUploadInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CreateMultipartUploadInput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadInput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CreateMultipartUploadInput) SetServerSideEncryption(v string) *CreateMultipartUploadInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *CreateMultipartUploadInput) SetStorageClass(v string) *CreateMultipartUploadInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *CreateMultipartUploadInput) SetTagging(v string) *CreateMultipartUploadInput { + s.Tagging = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *CreateMultipartUploadInput { + s.WebsiteRedirectLocation = &v + return s +} + +func (s *CreateMultipartUploadInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CreateMultipartUploadInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s CreateMultipartUploadInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type CreateMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // If the bucket has a lifecycle rule configured with an action to abort incomplete + // multipart uploads and the prefix in the lifecycle rule matches the object + // name in the request, the response includes this header. The header indicates + // when the initiated multipart upload becomes eligible for an abort operation. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon S3 User Guide. + // + // The response also includes the x-amz-abort-rule-id header that provides the + // ID of the lifecycle configuration rule that defines the abort action. + // + // This functionality is not supported for directory buckets. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` + + // This header is returned along with the x-amz-abort-date header. It identifies + // the applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. + // + // This functionality is not supported for directory buckets. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + + // The name of the bucket to which the multipart upload was initiated. Does + // not return the access point ARN or access point alias if used. + // + // Access points are not supported by directory buckets. + Bucket *string `locationName:"Bucket" type:"string"` + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to confirm the encryption algorithm + // that's used. + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide the round-trip message integrity + // verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, indicates the Amazon Web Services KMS Encryption Context to use + // for object encryption. The value of this header is a base64-encoded UTF-8 + // string holding JSON with the encryption context key-value pairs. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateMultipartUploadOutput's + // String and GoString methods. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateMultipartUploadOutput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256, aws:kms). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // ID for the initiated multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetAbortDate sets the AbortDate field's value. +func (s *CreateMultipartUploadOutput) SetAbortDate(v time.Time) *CreateMultipartUploadOutput { + s.AbortDate = &v + return s +} + +// SetAbortRuleId sets the AbortRuleId field's value. +func (s *CreateMultipartUploadOutput) SetAbortRuleId(v string) *CreateMultipartUploadOutput { + s.AbortRuleId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateMultipartUploadOutput) SetBucket(v string) *CreateMultipartUploadOutput { + s.Bucket = &v + return s +} + +func (s *CreateMultipartUploadOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CreateMultipartUploadOutput) SetBucketKeyEnabled(v bool) *CreateMultipartUploadOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *CreateMultipartUploadOutput) SetChecksumAlgorithm(v string) *CreateMultipartUploadOutput { + s.ChecksumAlgorithm = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput { + s.Key = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CreateMultipartUploadOutput) SetRequestCharged(v string) *CreateMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CreateMultipartUploadOutput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CreateMultipartUploadOutput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadOutput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CreateMultipartUploadOutput) SetServerSideEncryption(v string) *CreateMultipartUploadOutput { + s.ServerSideEncryption = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUploadOutput { + s.UploadId = &v + return s +} + +type CreateSessionInput struct { + _ struct{} `locationName:"CreateSessionRequest" type:"structure"` + + // The name of the bucket that you create a session for. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies the mode of the session that will be created, either ReadWrite + // or ReadOnly. By default, a ReadWrite session is created. A ReadWrite session + // is capable of executing all the Zonal endpoint APIs on a directory bucket. + // A ReadOnly session is constrained to execute the following Zonal endpoint + // APIs: GetObject, HeadObject, ListObjectsV2, GetObjectAttributes, ListParts, + // and ListMultipartUploads. + SessionMode *string `location:"header" locationName:"x-amz-create-session-mode" type:"string" enum:"SessionMode"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateSessionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateSessionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSessionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSessionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *CreateSessionInput) SetBucket(v string) *CreateSessionInput { + s.Bucket = &v + return s +} + +func (s *CreateSessionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetSessionMode sets the SessionMode field's value. +func (s *CreateSessionInput) SetSessionMode(v string) *CreateSessionInput { + s.SessionMode = &v + return s +} + +func (s *CreateSessionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CreateSessionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s CreateSessionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type CreateSessionOutput struct { + _ struct{} `type:"structure"` + + // The established temporary security credentials for the created session. + // + // Credentials is a required field + Credentials *SessionCredentials `locationName:"Credentials" type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateSessionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateSessionOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *CreateSessionOutput) SetCredentials(v *SessionCredentials) *CreateSessionOutput { + s.Credentials = v + return s +} + +// The container element for specifying the default Object Lock retention settings +// for new objects placed in the specified bucket. +// +// - The DefaultRetention settings require both a mode and a period. +// +// - The DefaultRetention period can be either Days or Years but you must +// select one. You cannot specify Days and Years at the same time. +type DefaultRetention struct { + _ struct{} `type:"structure"` + + // The number of days that you want to specify for the default retention period. + // Must be used with Mode. + Days *int64 `type:"integer"` + + // The default Object Lock retention mode you want to apply to new objects placed + // in the specified bucket. Must be used with either Days or Years. + Mode *string `type:"string" enum:"ObjectLockRetentionMode"` + + // The number of years that you want to specify for the default retention period. + // Must be used with Mode. + Years *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DefaultRetention) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DefaultRetention) GoString() string { + return s.String() +} + +// SetDays sets the Days field's value. +func (s *DefaultRetention) SetDays(v int64) *DefaultRetention { + s.Days = &v + return s +} + +// SetMode sets the Mode field's value. +func (s *DefaultRetention) SetMode(v string) *DefaultRetention { + s.Mode = &v + return s +} + +// SetYears sets the Years field's value. +func (s *DefaultRetention) SetYears(v int64) *DefaultRetention { + s.Years = &v + return s +} + +// Container for the objects to delete. +type Delete struct { + _ struct{} `type:"structure"` + + // The object to delete. + // + // Directory buckets - For directory buckets, an object that's composed entirely + // of whitespace characters is not supported by the DeleteObjects API operation. + // The request will receive a 400 Bad Request error and none of the objects + // in the request will be deleted. + // + // Objects is a required field + Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` + + // Element to enable quiet mode for the request. When you add this element, + // you must set its value to true. + Quiet *bool `type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Delete) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Delete) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Delete) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Delete"} + if s.Objects == nil { + invalidParams.Add(request.NewErrParamRequired("Objects")) + } + if s.Objects != nil { + for i, v := range s.Objects { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Objects", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetObjects sets the Objects field's value. +func (s *Delete) SetObjects(v []*ObjectIdentifier) *Delete { + s.Objects = v + return s +} + +// SetQuiet sets the Quiet field's value. +func (s *Delete) SetQuiet(v bool) *Delete { + s.Quiet = &v + return s +} + +type DeleteBucketAnalyticsConfigurationInput struct { + _ struct{} `locationName:"DeleteBucketAnalyticsConfigurationRequest" type:"structure"` + + // The name of the bucket from which an analytics configuration is deleted. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID that identifies the analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketAnalyticsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketAnalyticsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketAnalyticsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketAnalyticsConfigurationInput) SetBucket(v string) *DeleteBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketAnalyticsConfigurationInput) SetExpectedBucketOwner(v string) *DeleteBucketAnalyticsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +func (s *DeleteBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketAnalyticsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketAnalyticsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteBucketCorsInput struct { + _ struct{} `locationName:"DeleteBucketCorsRequest" type:"structure"` + + // Specifies the bucket whose cors configuration is being deleted. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketCorsInput) SetBucket(v string) *DeleteBucketCorsInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketCorsInput) SetExpectedBucketOwner(v string) *DeleteBucketCorsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketCorsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketCorsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketCorsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketCorsOutput) GoString() string { + return s.String() +} + +type DeleteBucketEncryptionInput struct { + _ struct{} `locationName:"DeleteBucketEncryptionRequest" type:"structure"` + + // The name of the bucket containing the server-side encryption configuration + // to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketEncryptionInput) SetBucket(v string) *DeleteBucketEncryptionInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketEncryptionInput) SetExpectedBucketOwner(v string) *DeleteBucketEncryptionInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketEncryptionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketEncryptionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketEncryptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketEncryptionOutput) GoString() string { + return s.String() +} + +type DeleteBucketInput struct { + _ struct{} `locationName:"DeleteBucketRequest" type:"structure"` + + // Specifies the bucket being deleted. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name + // . Virtual-hosted-style requests aren't supported. Directory bucket names + // must be unique in the chosen Availability Zone. Bucket names must also follow + // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). + // For information about bucket naming restrictions, see Directory bucket naming + // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. + // If you specify this header, the request fails with the HTTP status code 501 + // Not Implemented. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketInput) SetExpectedBucketOwner(v string) *DeleteBucketInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketIntelligentTieringConfigurationInput struct { + _ struct{} `locationName:"DeleteBucketIntelligentTieringConfigurationRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketIntelligentTieringConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketIntelligentTieringConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketIntelligentTieringConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketIntelligentTieringConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketIntelligentTieringConfigurationInput) SetBucket(v string) *DeleteBucketIntelligentTieringConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketIntelligentTieringConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *DeleteBucketIntelligentTieringConfigurationInput) SetId(v string) *DeleteBucketIntelligentTieringConfigurationInput { + s.Id = &v + return s +} + +func (s *DeleteBucketIntelligentTieringConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketIntelligentTieringConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketIntelligentTieringConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketIntelligentTieringConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketIntelligentTieringConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketIntelligentTieringConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteBucketInventoryConfigurationInput struct { + _ struct{} `locationName:"DeleteBucketInventoryConfigurationRequest" type:"structure"` + + // The name of the bucket containing the inventory configuration to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketInventoryConfigurationInput) SetBucket(v string) *DeleteBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketInventoryConfigurationInput) SetExpectedBucketOwner(v string) *DeleteBucketInventoryConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +func (s *DeleteBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketInventoryConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketInventoryConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteBucketLifecycleInput struct { + _ struct{} `locationName:"DeleteBucketLifecycleRequest" type:"structure"` + + // The bucket name of the lifecycle to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketLifecycleInput) SetBucket(v string) *DeleteBucketLifecycleInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketLifecycleInput) SetExpectedBucketOwner(v string) *DeleteBucketLifecycleInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketLifecycleInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketLifecycleInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketLifecycleOutput) GoString() string { + return s.String() +} + +type DeleteBucketMetricsConfigurationInput struct { + _ struct{} `locationName:"DeleteBucketMetricsConfigurationRequest" type:"structure"` + + // The name of the bucket containing the metrics configuration to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the metrics configuration. The ID has a 64 character + // limit and can only contain letters, numbers, periods, dashes, and underscores. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketMetricsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketMetricsConfigurationInput) SetBucket(v string) *DeleteBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketMetricsConfigurationInput) SetExpectedBucketOwner(v string) *DeleteBucketMetricsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +func (s *DeleteBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketMetricsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketMetricsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketOutput) GoString() string { + return s.String() +} + +type DeleteBucketOwnershipControlsInput struct { + _ struct{} `locationName:"DeleteBucketOwnershipControlsRequest" type:"structure"` + + // The Amazon S3 bucket whose OwnershipControls you want to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketOwnershipControlsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketOwnershipControlsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketOwnershipControlsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketOwnershipControlsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketOwnershipControlsInput) SetBucket(v string) *DeleteBucketOwnershipControlsInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketOwnershipControlsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketOwnershipControlsInput) SetExpectedBucketOwner(v string) *DeleteBucketOwnershipControlsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketOwnershipControlsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketOwnershipControlsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketOwnershipControlsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketOwnershipControlsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketOwnershipControlsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketOwnershipControlsOutput) GoString() string { + return s.String() +} + +type DeleteBucketPolicyInput struct { + _ struct{} `locationName:"DeleteBucketPolicyRequest" type:"structure"` + + // The bucket name. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name + // . Virtual-hosted-style requests aren't supported. Directory bucket names + // must be unique in the chosen Availability Zone. Bucket names must also follow + // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). + // For information about bucket naming restrictions, see Directory bucket naming + // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. + // If you specify this header, the request fails with the HTTP status code 501 + // Not Implemented. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketPolicyInput) SetBucket(v string) *DeleteBucketPolicyInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketPolicyInput) SetExpectedBucketOwner(v string) *DeleteBucketPolicyInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketPolicyInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketPolicyOutput) GoString() string { + return s.String() +} + +type DeleteBucketReplicationInput struct { + _ struct{} `locationName:"DeleteBucketReplicationRequest" type:"structure"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketReplicationInput) SetBucket(v string) *DeleteBucketReplicationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketReplicationInput) SetExpectedBucketOwner(v string) *DeleteBucketReplicationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketReplicationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketReplicationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketReplicationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketReplicationOutput) GoString() string { + return s.String() +} + +type DeleteBucketTaggingInput struct { + _ struct{} `locationName:"DeleteBucketTaggingRequest" type:"structure"` + + // The bucket that has the tag set to be removed. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketTaggingInput) SetBucket(v string) *DeleteBucketTaggingInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketTaggingInput) SetExpectedBucketOwner(v string) *DeleteBucketTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketTaggingOutput) GoString() string { + return s.String() +} + +type DeleteBucketWebsiteInput struct { + _ struct{} `locationName:"DeleteBucketWebsiteRequest" type:"structure"` + + // The bucket name for which you want to remove the website configuration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketWebsiteInput) SetBucket(v string) *DeleteBucketWebsiteInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketWebsiteInput) SetExpectedBucketOwner(v string) *DeleteBucketWebsiteInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketWebsiteInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketWebsiteInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketWebsiteOutput) GoString() string { + return s.String() +} + +// Information about the delete marker. +type DeleteMarkerEntry struct { + _ struct{} `type:"structure"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time when the object was last modified. + LastModified *time.Time `type:"timestamp"` + + // The account that created the delete marker.> + Owner *Owner `type:"structure"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteMarkerEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteMarkerEntry) GoString() string { + return s.String() +} + +// SetIsLatest sets the IsLatest field's value. +func (s *DeleteMarkerEntry) SetIsLatest(v bool) *DeleteMarkerEntry { + s.IsLatest = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteMarkerEntry) SetKey(v string) *DeleteMarkerEntry { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *DeleteMarkerEntry) SetLastModified(v time.Time) *DeleteMarkerEntry { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *DeleteMarkerEntry) SetOwner(v *Owner) *DeleteMarkerEntry { + s.Owner = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry { + s.VersionId = &v + return s +} + +// Specifies whether Amazon S3 replicates delete markers. If you specify a Filter +// in your replication configuration, you must also include a DeleteMarkerReplication +// element. If your Filter includes a Tag element, the DeleteMarkerReplication +// Status must be set to Disabled, because Amazon S3 does not support replicating +// delete markers for tag-based rules. For an example configuration, see Basic +// Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). +// +// For more information about delete marker replication, see Basic Rule Configuration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html). +// +// If you are using an earlier version of the replication configuration, Amazon +// S3 handles replication of delete markers differently. For more information, +// see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). +type DeleteMarkerReplication struct { + _ struct{} `type:"structure"` + + // Indicates whether to replicate delete markers. + // + // Indicates whether to replicate delete markers. + Status *string `type:"string" enum:"DeleteMarkerReplicationStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteMarkerReplication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteMarkerReplication) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *DeleteMarkerReplication) SetStatus(v string) *DeleteMarkerReplication { + s.Status = &v + return s +} + +type DeleteObjectInput struct { + _ struct{} `locationName:"DeleteObjectRequest" type:"structure"` + + // The bucket name of the bucket containing the object. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates whether S3 Object Lock should bypass Governance-mode restrictions + // to process this operation. To use this header, you must have the s3:BypassGovernanceRetention + // permission. + // + // This functionality is not supported for directory buckets. + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Key name of the object to delete. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. Required to + // permanently delete a versioned object if versioning is configured with MFA + // delete enabled. + // + // This functionality is not supported for directory buckets. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Version ID used to reference a specific version of the object. + // + // For directory buckets in this API operation, only the null value of the version + // ID is supported. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectInput) SetBucket(v string) *DeleteObjectInput { + s.Bucket = &v + return s +} + +func (s *DeleteObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *DeleteObjectInput) SetBypassGovernanceRetention(v bool) *DeleteObjectInput { + s.BypassGovernanceRetention = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteObjectInput) SetExpectedBucketOwner(v string) *DeleteObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput { + s.Key = &v + return s +} + +// SetMFA sets the MFA field's value. +func (s *DeleteObjectInput) SetMFA(v string) *DeleteObjectInput { + s.MFA = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *DeleteObjectInput) SetRequestPayer(v string) *DeleteObjectInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput { + s.VersionId = &v + return s +} + +func (s *DeleteObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteObjectOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the specified object version that was permanently deleted + // was (true) or was not (false) a delete marker before deletion. In a simple + // DELETE, this header indicates whether (true) or not (false) the current version + // of the object is a delete marker. + // + // This functionality is not supported for directory buckets. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Returns the version ID of the delete marker created as a result of the DELETE + // operation. + // + // This functionality is not supported for directory buckets. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectOutput) GoString() string { + return s.String() +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *DeleteObjectOutput) SetDeleteMarker(v bool) *DeleteObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *DeleteObjectOutput) SetRequestCharged(v string) *DeleteObjectOutput { + s.RequestCharged = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput { + s.VersionId = &v + return s +} + +type DeleteObjectTaggingInput struct { + _ struct{} `locationName:"DeleteObjectTaggingRequest" type:"structure"` + + // The bucket name containing the objects from which to remove the tags. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key that identifies the object in the bucket from which to remove all + // tags. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The versionId of the object that the tag-set will be removed from. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectTaggingInput) SetBucket(v string) *DeleteObjectTaggingInput { + s.Bucket = &v + return s +} + +func (s *DeleteObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteObjectTaggingInput) SetExpectedBucketOwner(v string) *DeleteObjectTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteObjectTaggingInput) SetKey(v string) *DeleteObjectTaggingInput { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingInput { + s.VersionId = &v + return s +} + +func (s *DeleteObjectTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteObjectTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteObjectTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // The versionId of the object the tag-set was removed from. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingOutput { + s.VersionId = &v + return s +} + +type DeleteObjectsInput struct { + _ struct{} `locationName:"DeleteObjectsRequest" type:"structure" payload:"Delete"` + + // The bucket name containing the objects to delete. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies whether you want to delete this object even if it has a Governance-type + // Object Lock in place. To use this header, you must have the s3:BypassGovernanceRetention + // permission. + // + // This functionality is not supported for directory buckets. + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon + // S3 fails the request with the HTTP status code 400 Bad Request. + // + // For the x-amz-checksum-algorithm header, replace algorithm with the supported + // algorithm from the following list: + // + // * CRC32 + // + // * CRC32C + // + // * SHA1 + // + // * SHA256 + // + // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If the individual checksum value you provide through x-amz-checksum-algorithm + // doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, + // Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum + // algorithm that matches the provided value in x-amz-checksum-algorithm . + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // Container for the request. + // + // Delete is a required field + Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. Required to + // permanently delete a versioned object if versioning is configured with MFA + // delete enabled. + // + // When performing the DeleteObjects operation on an MFA delete enabled bucket, + // which attempts to delete the specified versioned objects, you must include + // an MFA token. If you don't provide an MFA token, the entire request will + // fail, even if there are non-versioned objects that you are trying to delete. + // If you provide an invalid token, whether there are versioned object keys + // in the request or not, the entire Multi-Object Delete request will fail. + // For information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Delete == nil { + invalidParams.Add(request.NewErrParamRequired("Delete")) + } + if s.Delete != nil { + if err := s.Delete.Validate(); err != nil { + invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectsInput) SetBucket(v string) *DeleteObjectsInput { + s.Bucket = &v + return s +} + +func (s *DeleteObjectsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *DeleteObjectsInput) SetBypassGovernanceRetention(v bool) *DeleteObjectsInput { + s.BypassGovernanceRetention = &v + return s +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *DeleteObjectsInput) SetChecksumAlgorithm(v string) *DeleteObjectsInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetDelete sets the Delete field's value. +func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput { + s.Delete = v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteObjectsInput) SetExpectedBucketOwner(v string) *DeleteObjectsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetMFA sets the MFA field's value. +func (s *DeleteObjectsInput) SetMFA(v string) *DeleteObjectsInput { + s.MFA = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput { + s.RequestPayer = &v + return s +} + +func (s *DeleteObjectsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteObjectsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteObjectsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteObjectsOutput struct { + _ struct{} `type:"structure"` + + // Container element for a successful delete. It identifies the object that + // was successfully deleted. + Deleted []*DeletedObject `type:"list" flattened:"true"` + + // Container for a failed delete action that describes the object that Amazon + // S3 attempted to delete and the error it encountered. + Errors []*Error `locationName:"Error" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectsOutput) GoString() string { + return s.String() +} + +// SetDeleted sets the Deleted field's value. +func (s *DeleteObjectsOutput) SetDeleted(v []*DeletedObject) *DeleteObjectsOutput { + s.Deleted = v + return s +} + +// SetErrors sets the Errors field's value. +func (s *DeleteObjectsOutput) SetErrors(v []*Error) *DeleteObjectsOutput { + s.Errors = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput { + s.RequestCharged = &v + return s +} + +type DeletePublicAccessBlockInput struct { + _ struct{} `locationName:"DeletePublicAccessBlockRequest" type:"structure"` + + // The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeletePublicAccessBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeletePublicAccessBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePublicAccessBlockInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeletePublicAccessBlockInput) SetBucket(v string) *DeletePublicAccessBlockInput { + s.Bucket = &v + return s +} + +func (s *DeletePublicAccessBlockInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeletePublicAccessBlockInput) SetExpectedBucketOwner(v string) *DeletePublicAccessBlockInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeletePublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeletePublicAccessBlockInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeletePublicAccessBlockInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeletePublicAccessBlockOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeletePublicAccessBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeletePublicAccessBlockOutput) GoString() string { + return s.String() +} + +// Information about the deleted object. +type DeletedObject struct { + _ struct{} `type:"structure"` + + // Indicates whether the specified object version that was permanently deleted + // was (true) or was not (false) a delete marker before deletion. In a simple + // DELETE, this header indicates whether (true) or not (false) the current version + // of the object is a delete marker. + // + // This functionality is not supported for directory buckets. + DeleteMarker *bool `type:"boolean"` + + // The version ID of the delete marker created as a result of the DELETE operation. + // If you delete a specific object version, the value returned by this header + // is the version ID of the object version deleted. + // + // This functionality is not supported for directory buckets. + DeleteMarkerVersionId *string `type:"string"` + + // The name of the deleted object. + Key *string `min:"1" type:"string"` + + // The version ID of the deleted object. + // + // This functionality is not supported for directory buckets. + VersionId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeletedObject) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeletedObject) GoString() string { + return s.String() +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *DeletedObject) SetDeleteMarker(v bool) *DeletedObject { + s.DeleteMarker = &v + return s +} + +// SetDeleteMarkerVersionId sets the DeleteMarkerVersionId field's value. +func (s *DeletedObject) SetDeleteMarkerVersionId(v string) *DeletedObject { + s.DeleteMarkerVersionId = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeletedObject) SetKey(v string) *DeletedObject { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeletedObject) SetVersionId(v string) *DeletedObject { + s.VersionId = &v + return s +} + +// Specifies information about where to publish analysis or configuration results +// for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC). +type Destination struct { + _ struct{} `type:"structure"` + + // Specify this only in a cross-account scenario (where source and destination + // bucket owners are not the same), and you want to change replica ownership + // to the Amazon Web Services account that owns the destination bucket. If this + // is not specified in the replication configuration, the replicas are owned + // by same Amazon Web Services account that owns the source object. + AccessControlTranslation *AccessControlTranslation `type:"structure"` + + // Destination bucket owner account ID. In a cross-account scenario, if you + // direct Amazon S3 to change replica ownership to the Amazon Web Services account + // that owns the destination bucket by specifying the AccessControlTranslation + // property, this is the account ID of the destination bucket owner. For more + // information, see Replication Additional Configuration: Changing the Replica + // Owner (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html) + // in the Amazon S3 User Guide. + Account *string `type:"string"` + + // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to + // store the results. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // A container that provides information about encryption. If SourceSelectionCriteria + // is specified, you must specify this element. + EncryptionConfiguration *EncryptionConfiguration `type:"structure"` + + // A container specifying replication metrics-related settings enabling replication + // metrics and events. + Metrics *Metrics `type:"structure"` + + // A container specifying S3 Replication Time Control (S3 RTC), including whether + // S3 RTC is enabled and the time when all objects and operations on objects + // must be replicated. Must be specified together with a Metrics block. + ReplicationTime *ReplicationTime `type:"structure"` + + // The storage class to use when replicating objects, such as S3 Standard or + // reduced redundancy. By default, Amazon S3 uses the storage class of the source + // object to create the object replica. + // + // For valid values, see the StorageClass element of the PUT Bucket replication + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // action in the Amazon S3 API Reference. + StorageClass *string `type:"string" enum:"StorageClass"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Destination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Destination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Destination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Destination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.AccessControlTranslation != nil { + if err := s.AccessControlTranslation.Validate(); err != nil { + invalidParams.AddNested("AccessControlTranslation", err.(request.ErrInvalidParams)) + } + } + if s.Metrics != nil { + if err := s.Metrics.Validate(); err != nil { + invalidParams.AddNested("Metrics", err.(request.ErrInvalidParams)) + } + } + if s.ReplicationTime != nil { + if err := s.ReplicationTime.Validate(); err != nil { + invalidParams.AddNested("ReplicationTime", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessControlTranslation sets the AccessControlTranslation field's value. +func (s *Destination) SetAccessControlTranslation(v *AccessControlTranslation) *Destination { + s.AccessControlTranslation = v + return s +} + +// SetAccount sets the Account field's value. +func (s *Destination) SetAccount(v string) *Destination { + s.Account = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *Destination) SetBucket(v string) *Destination { + s.Bucket = &v + return s +} + +func (s *Destination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. +func (s *Destination) SetEncryptionConfiguration(v *EncryptionConfiguration) *Destination { + s.EncryptionConfiguration = v + return s +} + +// SetMetrics sets the Metrics field's value. +func (s *Destination) SetMetrics(v *Metrics) *Destination { + s.Metrics = v + return s +} + +// SetReplicationTime sets the ReplicationTime field's value. +func (s *Destination) SetReplicationTime(v *ReplicationTime) *Destination { + s.ReplicationTime = v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Destination) SetStorageClass(v string) *Destination { + s.StorageClass = &v + return s +} + +// Contains the type of server-side encryption used. +type Encryption struct { + _ struct{} `type:"structure"` + + // The server-side encryption algorithm used when storing job results in Amazon + // S3 (for example, AES256, aws:kms). + // + // EncryptionType is a required field + EncryptionType *string `type:"string" required:"true" enum:"ServerSideEncryption"` + + // If the encryption type is aws:kms, this optional value can be used to specify + // the encryption context for the restore results. + KMSContext *string `type:"string"` + + // If the encryption type is aws:kms, this optional value specifies the ID of + // the symmetric encryption customer managed key to use for encryption of job + // results. Amazon S3 only supports symmetric encryption KMS keys. For more + // information, see Asymmetric keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the Amazon Web Services Key Management Service Developer Guide. + // + // KMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by Encryption's + // String and GoString methods. + KMSKeyId *string `type:"string" sensitive:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Encryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Encryption) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Encryption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Encryption"} + if s.EncryptionType == nil { + invalidParams.Add(request.NewErrParamRequired("EncryptionType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncryptionType sets the EncryptionType field's value. +func (s *Encryption) SetEncryptionType(v string) *Encryption { + s.EncryptionType = &v + return s +} + +// SetKMSContext sets the KMSContext field's value. +func (s *Encryption) SetKMSContext(v string) *Encryption { + s.KMSContext = &v + return s +} + +// SetKMSKeyId sets the KMSKeyId field's value. +func (s *Encryption) SetKMSKeyId(v string) *Encryption { + s.KMSKeyId = &v + return s +} + +// Specifies encryption-related information for an Amazon S3 bucket that is +// a destination for replicated objects. +type EncryptionConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the ID (Key ARN or Alias ARN) of the customer managed Amazon Web + // Services KMS key stored in Amazon Web Services Key Management Service (KMS) + // for the destination bucket. Amazon S3 uses this key to encrypt replica objects. + // Amazon S3 only supports symmetric encryption KMS keys. For more information, + // see Asymmetric keys in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the Amazon Web Services Key Management Service Developer Guide. + ReplicaKmsKeyID *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EncryptionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EncryptionConfiguration) GoString() string { + return s.String() +} + +// SetReplicaKmsKeyID sets the ReplicaKmsKeyID field's value. +func (s *EncryptionConfiguration) SetReplicaKmsKeyID(v string) *EncryptionConfiguration { + s.ReplicaKmsKeyID = &v + return s +} + +// A message that indicates the request is complete and no more messages will +// be sent. You should not assume that the request is complete until the client +// receives an EndEvent. +type EndEvent struct { + _ struct{} `locationName:"EndEvent" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EndEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EndEvent) GoString() string { + return s.String() +} + +// The EndEvent is and event in the SelectObjectContentEventStream group of events. +func (s *EndEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the EndEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *EndEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *EndEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + return msg, err +} + +// Container for all error elements. +type Error struct { + _ struct{} `type:"structure"` + + // The error code is a string that uniquely identifies an error condition. It + // is meant to be read and understood by programs that detect and handle errors + // by type. The following is a list of Amazon S3 error codes. For more information, + // see Error responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html). + // + // * Code: AccessDenied Description: Access Denied HTTP Status Code: 403 + // Forbidden SOAP Fault Code Prefix: Client + // + // * Code: AccountProblem Description: There is a problem with your Amazon + // Web Services account that prevents the action from completing successfully. + // Contact Amazon Web Services Support for further assistance. HTTP Status + // Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: AllAccessDisabled Description: All access to this Amazon S3 resource + // has been disabled. Contact Amazon Web Services Support for further assistance. + // HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: AmbiguousGrantByEmailAddress Description: The email address you + // provided is associated with more than one account. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: AuthorizationHeaderMalformed Description: The authorization header + // you provided is invalid. HTTP Status Code: 400 Bad Request HTTP Status + // Code: N/A + // + // * Code: BadDigest Description: The Content-MD5 you specified did not match + // what we received. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: BucketAlreadyExists Description: The requested bucket name is + // not available. The bucket namespace is shared by all users of the system. + // Please select a different name and try again. HTTP Status Code: 409 Conflict + // SOAP Fault Code Prefix: Client + // + // * Code: BucketAlreadyOwnedByYou Description: The bucket you tried to create + // already exists, and you own it. Amazon S3 returns this error in all Amazon + // Web Services Regions except in the North Virginia Region. For legacy compatibility, + // if you re-create an existing bucket that you already own in the North + // Virginia Region, Amazon S3 returns 200 OK and resets the bucket access + // control lists (ACLs). Code: 409 Conflict (in all Regions except the North + // Virginia Region) SOAP Fault Code Prefix: Client + // + // * Code: BucketNotEmpty Description: The bucket you tried to delete is + // not empty. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: CredentialsNotSupported Description: This request does not support + // credentials. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: CrossLocationLoggingProhibited Description: Cross-location logging + // not allowed. Buckets in one geographic location cannot log information + // to a bucket in another location. HTTP Status Code: 403 Forbidden SOAP + // Fault Code Prefix: Client + // + // * Code: EntityTooSmall Description: Your proposed upload is smaller than + // the minimum allowed object size. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: EntityTooLarge Description: Your proposed upload exceeds the maximum + // allowed object size. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: ExpiredToken Description: The provided token has expired. HTTP + // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IllegalVersioningConfigurationException Description: Indicates + // that the versioning configuration specified in the request is invalid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IncompleteBody Description: You did not provide the number of + // bytes specified by the Content-Length HTTP header HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IncorrectNumberOfFilesInPostRequest Description: POST requires + // exactly one file upload per request. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: InlineDataTooLarge Description: Inline data exceeds the maximum + // allowed size. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InternalError Description: We encountered an internal error. Please + // try again. HTTP Status Code: 500 Internal Server Error SOAP Fault Code + // Prefix: Server + // + // * Code: InvalidAccessKeyId Description: The Amazon Web Services access + // key ID you provided does not exist in our records. HTTP Status Code: 403 + // Forbidden SOAP Fault Code Prefix: Client + // + // * Code: InvalidAddressingHeader Description: You must specify the Anonymous + // role. HTTP Status Code: N/A SOAP Fault Code Prefix: Client + // + // * Code: InvalidArgument Description: Invalid Argument HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidBucketName Description: The specified bucket is not valid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidBucketState Description: The request is not valid with + // the current state of the bucket. HTTP Status Code: 409 Conflict SOAP Fault + // Code Prefix: Client + // + // * Code: InvalidDigest Description: The Content-MD5 you specified is not + // valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidEncryptionAlgorithmError Description: The encryption request + // you specified is not valid. The valid value is AES256. HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidLocationConstraint Description: The specified location + // constraint is not valid. For more information about Regions, see How to + // Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidObjectState Description: The action is not valid for the + // current state of the object. HTTP Status Code: 403 Forbidden SOAP Fault + // Code Prefix: Client + // + // * Code: InvalidPart Description: One or more of the specified parts could + // not be found. The part might not have been uploaded, or the specified + // entity tag might not have matched the part's entity tag. HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidPartOrder Description: The list of parts was not in ascending + // order. Parts list must be specified in order by part number. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidPayer Description: All access to this object has been disabled. + // Please contact Amazon Web Services Support for further assistance. HTTP + // Status Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: InvalidPolicyDocument Description: The content of the form does + // not meet the conditions specified in the policy document. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidRange Description: The requested range cannot be satisfied. + // HTTP Status Code: 416 Requested Range Not Satisfiable SOAP Fault Code + // Prefix: Client + // + // * Code: InvalidRequest Description: Please use AWS4-HMAC-SHA256. HTTP + // Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: SOAP requests must be made over an + // HTTPS connection. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported for buckets with non-DNS compliant names. HTTP Status Code: + // 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported for buckets with periods (.) in their names. HTTP Status + // Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate endpoint + // only supports virtual style requests. HTTP Status Code: 400 Bad Request + // Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is not + // configured on this bucket. HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is disabled + // on this bucket. HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported on this bucket. Contact Amazon Web Services Support for + // more information. HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration cannot + // be enabled on this bucket. Contact Amazon Web Services Support for more + // information. HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidSecurity Description: The provided security credentials + // are not valid. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidSOAPRequest Description: The SOAP request body is invalid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidStorageClass Description: The storage class you specified + // is not valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidTargetBucketForLogging Description: The target bucket for + // logging does not exist, is not owned by you, or does not have the appropriate + // grants for the log-delivery group. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: InvalidToken Description: The provided token is malformed or otherwise + // invalid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidURI Description: Couldn't parse the specified URI. HTTP + // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: KeyTooLongError Description: Your key is too long. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MalformedACLError Description: The XML you provided was not well-formed + // or did not validate against our published schema. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MalformedPOSTRequest Description: The body of your POST request + // is not well-formed multipart/form-data. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: MalformedXML Description: This happens when the user sends malformed + // XML (XML that doesn't conform to the published XSD) for the configuration. + // The error message is, "The XML you provided was not well-formed or did + // not validate against our published schema." HTTP Status Code: 400 Bad + // Request SOAP Fault Code Prefix: Client + // + // * Code: MaxMessageLengthExceeded Description: Your request was too big. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MaxPostPreDataLengthExceededError Description: Your POST request + // fields preceding the upload file were too large. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MetadataTooLarge Description: Your metadata headers exceed the + // maximum allowed metadata size. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: MethodNotAllowed Description: The specified method is not allowed + // against this resource. HTTP Status Code: 405 Method Not Allowed SOAP Fault + // Code Prefix: Client + // + // * Code: MissingAttachment Description: A SOAP attachment was expected, + // but none were found. HTTP Status Code: N/A SOAP Fault Code Prefix: Client + // + // * Code: MissingContentLength Description: You must provide the Content-Length + // HTTP header. HTTP Status Code: 411 Length Required SOAP Fault Code Prefix: + // Client + // + // * Code: MissingRequestBodyError Description: This happens when the user + // sends an empty XML document as a request. The error message is, "Request + // body is empty." HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: MissingSecurityElement Description: The SOAP 1.1 request is missing + // a security element. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: MissingSecurityHeader Description: Your request is missing a required + // header. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: NoLoggingStatusForKey Description: There is no such thing as a + // logging status subresource for a key. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: NoSuchBucket Description: The specified bucket does not exist. + // HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: Client + // + // * Code: NoSuchBucketPolicy Description: The specified bucket does not + // have a bucket policy. HTTP Status Code: 404 Not Found SOAP Fault Code + // Prefix: Client + // + // * Code: NoSuchKey Description: The specified key does not exist. HTTP + // Status Code: 404 Not Found SOAP Fault Code Prefix: Client + // + // * Code: NoSuchLifecycleConfiguration Description: The lifecycle configuration + // does not exist. HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: + // Client + // + // * Code: NoSuchUpload Description: The specified multipart upload does + // not exist. The upload ID might be invalid, or the multipart upload might + // have been aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault + // Code Prefix: Client + // + // * Code: NoSuchVersion Description: Indicates that the version ID specified + // in the request does not match an existing version. HTTP Status Code: 404 + // Not Found SOAP Fault Code Prefix: Client + // + // * Code: NotImplemented Description: A header you provided implies functionality + // that is not implemented. HTTP Status Code: 501 Not Implemented SOAP Fault + // Code Prefix: Server + // + // * Code: NotSignedUp Description: Your account is not signed up for the + // Amazon S3 service. You must sign up before you can use Amazon S3. You + // can sign up at the following URL: Amazon S3 (http://aws.amazon.com/s3) + // HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: OperationAborted Description: A conflicting conditional action + // is currently in progress against this resource. Try again. HTTP Status + // Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: PermanentRedirect Description: The bucket you are attempting to + // access must be addressed using the specified endpoint. Send all future + // requests to this endpoint. HTTP Status Code: 301 Moved Permanently SOAP + // Fault Code Prefix: Client + // + // * Code: PreconditionFailed Description: At least one of the preconditions + // you specified did not hold. HTTP Status Code: 412 Precondition Failed + // SOAP Fault Code Prefix: Client + // + // * Code: Redirect Description: Temporary redirect. HTTP Status Code: 307 + // Moved Temporarily SOAP Fault Code Prefix: Client + // + // * Code: RestoreAlreadyInProgress Description: Object restore is already + // in progress. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: RequestIsNotMultiPartContent Description: Bucket POST must be + // of the enclosure-type multipart/form-data. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: RequestTimeout Description: Your socket connection to the server + // was not read from or written to within the timeout period. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: RequestTimeTooSkewed Description: The difference between the request + // time and the server's time is too large. HTTP Status Code: 403 Forbidden + // SOAP Fault Code Prefix: Client + // + // * Code: RequestTorrentOfBucketError Description: Requesting the torrent + // file of a bucket is not permitted. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: SignatureDoesNotMatch Description: The request signature we calculated + // does not match the signature you provided. Check your Amazon Web Services + // secret access key and signing method. For more information, see REST Authentication + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) + // and SOAP Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html) + // for details. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: ServiceUnavailable Description: Service is unable to handle request. + // HTTP Status Code: 503 Service Unavailable SOAP Fault Code Prefix: Server + // + // * Code: SlowDown Description: Reduce your request rate. HTTP Status Code: + // 503 Slow Down SOAP Fault Code Prefix: Server + // + // * Code: TemporaryRedirect Description: You are being redirected to the + // bucket while DNS updates. HTTP Status Code: 307 Moved Temporarily SOAP + // Fault Code Prefix: Client + // + // * Code: TokenRefreshRequired Description: The provided token must be refreshed. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: TooManyBuckets Description: You have attempted to create more + // buckets than allowed. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: UnexpectedContent Description: This request does not support content. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: UnresolvableGrantByEmailAddress Description: The email address + // you provided does not match any account on record. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: UserKeyMustBeSpecified Description: The bucket POST must contain + // the specified field name. If it is specified, check the order of the fields. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + Code *string `type:"string"` + + // The error key. + Key *string `min:"1" type:"string"` + + // The error message contains a generic description of the error condition in + // English. It is intended for a human audience. Simple programs display the + // message directly to the end user if they encounter an error condition they + // don't know how or don't care to handle. Sophisticated programs with more + // exhaustive error handling and proper internationalization are more likely + // to ignore the error message. + Message *string `type:"string"` + + // The version ID of the error. + // + // This functionality is not supported for directory buckets. + VersionId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Error) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Error) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *Error) SetCode(v string) *Error { + s.Code = &v + return s +} + +// SetKey sets the Key field's value. +func (s *Error) SetKey(v string) *Error { + s.Key = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *Error) SetMessage(v string) *Error { + s.Message = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *Error) SetVersionId(v string) *Error { + s.VersionId = &v + return s +} + +// The error information. +type ErrorDocument struct { + _ struct{} `type:"structure"` + + // The object key name to use when a 4XX class error occurs. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ErrorDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ErrorDocument) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ErrorDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *ErrorDocument) SetKey(v string) *ErrorDocument { + s.Key = &v + return s +} + +// A container for specifying the configuration for Amazon EventBridge. +type EventBridgeConfiguration struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EventBridgeConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EventBridgeConfiguration) GoString() string { + return s.String() +} + +// Optional configuration to replicate existing source bucket objects. For more +// information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) +// in the Amazon S3 User Guide. +type ExistingObjectReplication struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 replicates existing source bucket objects. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExistingObjectReplicationStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExistingObjectReplication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExistingObjectReplication) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExistingObjectReplication) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExistingObjectReplication"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *ExistingObjectReplication) SetStatus(v string) *ExistingObjectReplication { + s.Status = &v + return s +} + +// Specifies the Amazon S3 object key name to filter on. An object key name +// is the name assigned to an object in your Amazon S3 bucket. You specify whether +// to filter on the suffix or prefix of the object key name. A prefix is a specific +// string of characters at the beginning of an object key name, which you can +// use to organize objects. For example, you can start the key names of related +// objects with a prefix, such as 2023- or engineering/. Then, you can use FilterRule +// to find objects in a bucket with key names that have the same prefix. A suffix +// is similar to a prefix, but it is at the end of the object key name instead +// of at the beginning. +type FilterRule struct { + _ struct{} `type:"structure"` + + // The object key name prefix or suffix identifying one or more objects to which + // the filtering rule applies. The maximum length is 1,024 characters. Overlapping + // prefixes and suffixes are not supported. For more information, see Configuring + // Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon S3 User Guide. + Name *string `type:"string" enum:"FilterRuleName"` + + // The value that the filter searches for in object key names. + Value *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FilterRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FilterRule) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *FilterRule) SetName(v string) *FilterRule { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *FilterRule) SetValue(v string) *FilterRule { + s.Value = &v + return s +} + +type GetBucketAccelerateConfigurationInput struct { + _ struct{} `locationName:"GetBucketAccelerateConfigurationRequest" type:"structure"` + + // The name of the bucket for which the accelerate configuration is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAccelerateConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAccelerateConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAccelerateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAccelerateConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAccelerateConfigurationInput) SetBucket(v string) *GetBucketAccelerateConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketAccelerateConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketAccelerateConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketAccelerateConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetBucketAccelerateConfigurationInput) SetRequestPayer(v string) *GetBucketAccelerateConfigurationInput { + s.RequestPayer = &v + return s +} + +func (s *GetBucketAccelerateConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketAccelerateConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketAccelerateConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketAccelerateConfigurationOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // The accelerate configuration of the bucket. + Status *string `type:"string" enum:"BucketAccelerateStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAccelerateConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAccelerateConfigurationOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetBucketAccelerateConfigurationOutput) SetRequestCharged(v string) *GetBucketAccelerateConfigurationOutput { + s.RequestCharged = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketAccelerateConfigurationOutput { + s.Status = &v + return s +} + +type GetBucketAclInput struct { + _ struct{} `locationName:"GetBucketAclRequest" type:"structure"` + + // Specifies the S3 bucket whose ACL is being requested. + // + // When you use this API operation with an access point, provide the alias of + // the access point in place of the bucket name. + // + // When you use this API operation with an Object Lambda access point, provide + // the alias of the Object Lambda access point in place of the bucket name. + // If the Object Lambda access point alias in a request is not valid, the error + // code InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAclInput) SetBucket(v string) *GetBucketAclInput { + s.Bucket = &v + return s +} + +func (s *GetBucketAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketAclInput) SetExpectedBucketOwner(v string) *GetBucketAclInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketAclInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + // Container for the bucket owner's display name and ID. + Owner *Owner `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAclOutput) GoString() string { + return s.String() +} + +// SetGrants sets the Grants field's value. +func (s *GetBucketAclOutput) SetGrants(v []*Grant) *GetBucketAclOutput { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput { + s.Owner = v + return s +} + +type GetBucketAnalyticsConfigurationInput struct { + _ struct{} `locationName:"GetBucketAnalyticsConfigurationRequest" type:"structure"` + + // The name of the bucket from which an analytics configuration is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID that identifies the analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAnalyticsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAnalyticsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAnalyticsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAnalyticsConfigurationInput) SetBucket(v string) *GetBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketAnalyticsConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketAnalyticsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +func (s *GetBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketAnalyticsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketAnalyticsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` + + // The configuration and any analyses for the analytics filter. + AnalyticsConfiguration *AnalyticsConfiguration `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} + +// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. +func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *GetBucketAnalyticsConfigurationOutput { + s.AnalyticsConfiguration = v + return s +} + +type GetBucketCorsInput struct { + _ struct{} `locationName:"GetBucketCorsRequest" type:"structure"` + + // The bucket name for which to get the cors configuration. + // + // When you use this API operation with an access point, provide the alias of + // the access point in place of the bucket name. + // + // When you use this API operation with an Object Lambda access point, provide + // the alias of the Object Lambda access point in place of the bucket name. + // If the Object Lambda access point alias in a request is not valid, the error + // code InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketCorsInput) SetBucket(v string) *GetBucketCorsInput { + s.Bucket = &v + return s +} + +func (s *GetBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketCorsInput) SetExpectedBucketOwner(v string) *GetBucketCorsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketCorsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketCorsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketCorsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketCorsOutput struct { + _ struct{} `type:"structure"` + + // A set of origins and methods (cross-origin access that you want to allow). + // You can add up to 100 rules to the configuration. + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketCorsOutput) GoString() string { + return s.String() +} + +// SetCORSRules sets the CORSRules field's value. +func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput { + s.CORSRules = v + return s +} + +type GetBucketEncryptionInput struct { + _ struct{} `locationName:"GetBucketEncryptionRequest" type:"structure"` + + // The name of the bucket from which the server-side encryption configuration + // is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketEncryptionInput) SetBucket(v string) *GetBucketEncryptionInput { + s.Bucket = &v + return s +} + +func (s *GetBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketEncryptionInput) SetExpectedBucketOwner(v string) *GetBucketEncryptionInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketEncryptionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketEncryptionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketEncryptionOutput struct { + _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` + + // Specifies the default server-side-encryption configuration. + ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketEncryptionOutput) GoString() string { + return s.String() +} + +// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value. +func (s *GetBucketEncryptionOutput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *GetBucketEncryptionOutput { + s.ServerSideEncryptionConfiguration = v + return s +} + +type GetBucketIntelligentTieringConfigurationInput struct { + _ struct{} `locationName:"GetBucketIntelligentTieringConfigurationRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketIntelligentTieringConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketIntelligentTieringConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketIntelligentTieringConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketIntelligentTieringConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketIntelligentTieringConfigurationInput) SetBucket(v string) *GetBucketIntelligentTieringConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketIntelligentTieringConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *GetBucketIntelligentTieringConfigurationInput) SetId(v string) *GetBucketIntelligentTieringConfigurationInput { + s.Id = &v + return s +} + +func (s *GetBucketIntelligentTieringConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketIntelligentTieringConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketIntelligentTieringConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketIntelligentTieringConfigurationOutput struct { + _ struct{} `type:"structure" payload:"IntelligentTieringConfiguration"` + + // Container for S3 Intelligent-Tiering configuration. + IntelligentTieringConfiguration *IntelligentTieringConfiguration `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketIntelligentTieringConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketIntelligentTieringConfigurationOutput) GoString() string { + return s.String() +} + +// SetIntelligentTieringConfiguration sets the IntelligentTieringConfiguration field's value. +func (s *GetBucketIntelligentTieringConfigurationOutput) SetIntelligentTieringConfiguration(v *IntelligentTieringConfiguration) *GetBucketIntelligentTieringConfigurationOutput { + s.IntelligentTieringConfiguration = v + return s +} + +type GetBucketInventoryConfigurationInput struct { + _ struct{} `locationName:"GetBucketInventoryConfigurationRequest" type:"structure"` + + // The name of the bucket containing the inventory configuration to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketInventoryConfigurationInput) SetBucket(v string) *GetBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketInventoryConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketInventoryConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +func (s *GetBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketInventoryConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketInventoryConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure" payload:"InventoryConfiguration"` + + // Specifies the inventory configuration. + InventoryConfiguration *InventoryConfiguration `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +// SetInventoryConfiguration sets the InventoryConfiguration field's value. +func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *InventoryConfiguration) *GetBucketInventoryConfigurationOutput { + s.InventoryConfiguration = v + return s +} + +type GetBucketLifecycleConfigurationInput struct { + _ struct{} `locationName:"GetBucketLifecycleConfigurationRequest" type:"structure"` + + // The name of the bucket for which to get the lifecycle information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLifecycleConfigurationInput) SetBucket(v string) *GetBucketLifecycleConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLifecycleConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketLifecycleConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketLifecycleConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketLifecycleConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLifecycleConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketLifecycleConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` + + // Container for a lifecycle rule. + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +// SetRules sets the Rules field's value. +func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *GetBucketLifecycleConfigurationOutput { + s.Rules = v + return s +} + +type GetBucketLifecycleInput struct { + _ struct{} `locationName:"GetBucketLifecycleRequest" type:"structure"` + + // The name of the bucket for which to get the lifecycle information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLifecycleInput) SetBucket(v string) *GetBucketLifecycleInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketLifecycleInput) SetExpectedBucketOwner(v string) *GetBucketLifecycleInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLifecycleInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketLifecycleInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketLifecycleOutput struct { + _ struct{} `type:"structure"` + + // Container for a lifecycle rule. + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLifecycleOutput) GoString() string { + return s.String() +} + +// SetRules sets the Rules field's value. +func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput { + s.Rules = v + return s +} + +type GetBucketLocationInput struct { + _ struct{} `locationName:"GetBucketLocationRequest" type:"structure"` + + // The name of the bucket for which to get the location. + // + // When you use this API operation with an access point, provide the alias of + // the access point in place of the bucket name. + // + // When you use this API operation with an Object Lambda access point, provide + // the alias of the Object Lambda access point in place of the bucket name. + // If the Object Lambda access point alias in a request is not valid, the error + // code InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLocationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLocationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLocationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLocationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLocationInput) SetBucket(v string) *GetBucketLocationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLocationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketLocationInput) SetExpectedBucketOwner(v string) *GetBucketLocationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketLocationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLocationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketLocationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketLocationOutput struct { + _ struct{} `type:"structure"` + + // Specifies the Region where the bucket resides. For a list of all the Amazon + // S3 supported location constraints by Region, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region). + // Buckets in Region us-east-1 have a LocationConstraint of null. + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLocationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLocationOutput) GoString() string { + return s.String() +} + +// SetLocationConstraint sets the LocationConstraint field's value. +func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLocationOutput { + s.LocationConstraint = &v + return s +} + +type GetBucketLoggingInput struct { + _ struct{} `locationName:"GetBucketLoggingRequest" type:"structure"` + + // The bucket name for which to get the logging information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLoggingInput) SetBucket(v string) *GetBucketLoggingInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLoggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketLoggingInput) SetExpectedBucketOwner(v string) *GetBucketLoggingInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketLoggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLoggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketLoggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketLoggingOutput struct { + _ struct{} `type:"structure"` + + // Describes where logs are stored and the prefix that Amazon S3 assigns to + // all log object keys for a bucket. For more information, see PUT Bucket logging + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) + // in the Amazon S3 API Reference. + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLoggingOutput) GoString() string { + return s.String() +} + +// SetLoggingEnabled sets the LoggingEnabled field's value. +func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucketLoggingOutput { + s.LoggingEnabled = v + return s +} + +type GetBucketMetricsConfigurationInput struct { + _ struct{} `locationName:"GetBucketMetricsConfigurationRequest" type:"structure"` + + // The name of the bucket containing the metrics configuration to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the metrics configuration. The ID has a 64 character + // limit and can only contain letters, numbers, periods, dashes, and underscores. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketMetricsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketMetricsConfigurationInput) SetBucket(v string) *GetBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketMetricsConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketMetricsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +func (s *GetBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketMetricsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketMetricsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure" payload:"MetricsConfiguration"` + + // Specifies the metrics configuration. + MetricsConfiguration *MetricsConfiguration `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +// SetMetricsConfiguration sets the MetricsConfiguration field's value. +func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *MetricsConfiguration) *GetBucketMetricsConfigurationOutput { + s.MetricsConfiguration = v + return s +} + +type GetBucketNotificationConfigurationRequest struct { + _ struct{} `locationName:"GetBucketNotificationConfigurationRequest" type:"structure"` + + // The name of the bucket for which to get the notification configuration. + // + // When you use this API operation with an access point, provide the alias of + // the access point in place of the bucket name. + // + // When you use this API operation with an Object Lambda access point, provide + // the alias of the Object Lambda access point in place of the bucket name. + // If the Object Lambda access point alias in a request is not valid, the error + // code InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketNotificationConfigurationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketNotificationConfigurationRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketNotificationConfigurationRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketNotificationConfigurationRequest"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketNotificationConfigurationRequest) SetBucket(v string) *GetBucketNotificationConfigurationRequest { + s.Bucket = &v + return s +} + +func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketNotificationConfigurationRequest) SetExpectedBucketOwner(v string) *GetBucketNotificationConfigurationRequest { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketNotificationConfigurationRequest) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketNotificationConfigurationRequest) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketNotificationConfigurationRequest) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketOwnershipControlsInput struct { + _ struct{} `locationName:"GetBucketOwnershipControlsRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose OwnershipControls you want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketOwnershipControlsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketOwnershipControlsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketOwnershipControlsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketOwnershipControlsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketOwnershipControlsInput) SetBucket(v string) *GetBucketOwnershipControlsInput { + s.Bucket = &v + return s +} + +func (s *GetBucketOwnershipControlsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketOwnershipControlsInput) SetExpectedBucketOwner(v string) *GetBucketOwnershipControlsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketOwnershipControlsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketOwnershipControlsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketOwnershipControlsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketOwnershipControlsOutput struct { + _ struct{} `type:"structure" payload:"OwnershipControls"` + + // The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or ObjectWriter) + // currently in effect for this Amazon S3 bucket. + OwnershipControls *OwnershipControls `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketOwnershipControlsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketOwnershipControlsOutput) GoString() string { + return s.String() +} + +// SetOwnershipControls sets the OwnershipControls field's value. +func (s *GetBucketOwnershipControlsOutput) SetOwnershipControls(v *OwnershipControls) *GetBucketOwnershipControlsOutput { + s.OwnershipControls = v + return s +} + +type GetBucketPolicyInput struct { + _ struct{} `locationName:"GetBucketPolicyRequest" type:"structure"` + + // The bucket name to get the bucket policy for. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name + // . Virtual-hosted-style requests aren't supported. Directory bucket names + // must be unique in the chosen Availability Zone. Bucket names must also follow + // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). + // For information about bucket naming restrictions, see Directory bucket naming + // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide + // + // Access points - When you use this API operation with an access point, provide + // the alias of the access point in place of the bucket name. + // + // Object Lambda access points - When you use this API operation with an Object + // Lambda access point, provide the alias of the Object Lambda access point + // in place of the bucket name. If the Object Lambda access point alias in a + // request is not valid, the error code InvalidAccessPointAliasError is returned. + // For more information about InvalidAccessPointAliasError, see List of Error + // Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. + // If you specify this header, the request fails with the HTTP status code 501 + // Not Implemented. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketPolicyInput) SetBucket(v string) *GetBucketPolicyInput { + s.Bucket = &v + return s +} + +func (s *GetBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketPolicyInput) SetExpectedBucketOwner(v string) *GetBucketPolicyInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketPolicyInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketPolicyOutput struct { + _ struct{} `type:"structure" payload:"Policy"` + + // The bucket policy as a JSON document. + Policy *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketPolicyOutput) GoString() string { + return s.String() +} + +// SetPolicy sets the Policy field's value. +func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput { + s.Policy = &v + return s +} + +type GetBucketPolicyStatusInput struct { + _ struct{} `locationName:"GetBucketPolicyStatusRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose policy status you want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketPolicyStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketPolicyStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketPolicyStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyStatusInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketPolicyStatusInput) SetBucket(v string) *GetBucketPolicyStatusInput { + s.Bucket = &v + return s +} + +func (s *GetBucketPolicyStatusInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketPolicyStatusInput) SetExpectedBucketOwner(v string) *GetBucketPolicyStatusInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketPolicyStatusInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketPolicyStatusInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketPolicyStatusInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketPolicyStatusOutput struct { + _ struct{} `type:"structure" payload:"PolicyStatus"` + + // The policy status for the specified bucket. + PolicyStatus *PolicyStatus `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketPolicyStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketPolicyStatusOutput) GoString() string { + return s.String() +} + +// SetPolicyStatus sets the PolicyStatus field's value. +func (s *GetBucketPolicyStatusOutput) SetPolicyStatus(v *PolicyStatus) *GetBucketPolicyStatusOutput { + s.PolicyStatus = v + return s +} + +type GetBucketReplicationInput struct { + _ struct{} `locationName:"GetBucketReplicationRequest" type:"structure"` + + // The bucket name for which to get the replication information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketReplicationInput) SetBucket(v string) *GetBucketReplicationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketReplicationInput) SetExpectedBucketOwner(v string) *GetBucketReplicationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketReplicationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketReplicationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketReplicationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketReplicationOutput struct { + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + + // A container for replication rules. You can add up to 1,000 rules. The maximum + // size of a replication configuration is 2 MB. + ReplicationConfiguration *ReplicationConfiguration `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketReplicationOutput) GoString() string { + return s.String() +} + +// SetReplicationConfiguration sets the ReplicationConfiguration field's value. +func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationConfiguration) *GetBucketReplicationOutput { + s.ReplicationConfiguration = v + return s +} + +type GetBucketRequestPaymentInput struct { + _ struct{} `locationName:"GetBucketRequestPaymentRequest" type:"structure"` + + // The name of the bucket for which to get the payment request configuration + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketRequestPaymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketRequestPaymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketRequestPaymentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketRequestPaymentInput) SetBucket(v string) *GetBucketRequestPaymentInput { + s.Bucket = &v + return s +} + +func (s *GetBucketRequestPaymentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketRequestPaymentInput) SetExpectedBucketOwner(v string) *GetBucketRequestPaymentInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketRequestPaymentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketRequestPaymentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketRequestPaymentInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + Payer *string `type:"string" enum:"Payer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +// SetPayer sets the Payer field's value. +func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaymentOutput { + s.Payer = &v + return s +} + +type GetBucketTaggingInput struct { + _ struct{} `locationName:"GetBucketTaggingRequest" type:"structure"` + + // The name of the bucket for which to get the tagging information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketTaggingInput) SetBucket(v string) *GetBucketTaggingInput { + s.Bucket = &v + return s +} + +func (s *GetBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketTaggingInput) SetExpectedBucketOwner(v string) *GetBucketTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketTaggingOutput struct { + _ struct{} `type:"structure"` + + // Contains the tag set. + // + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketTaggingOutput) GoString() string { + return s.String() +} + +// SetTagSet sets the TagSet field's value. +func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput { + s.TagSet = v + return s +} + +type GetBucketVersioningInput struct { + _ struct{} `locationName:"GetBucketVersioningRequest" type:"structure"` + + // The name of the bucket for which to get the versioning information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketVersioningInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketVersioningInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketVersioningInput) SetBucket(v string) *GetBucketVersioningInput { + s.Bucket = &v + return s +} + +func (s *GetBucketVersioningInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketVersioningInput) SetExpectedBucketOwner(v string) *GetBucketVersioningInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketVersioningInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketVersioningInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketVersioningInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketVersioningOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketVersioningOutput) GoString() string { + return s.String() +} + +// SetMFADelete sets the MFADelete field's value. +func (s *GetBucketVersioningOutput) SetMFADelete(v string) *GetBucketVersioningOutput { + s.MFADelete = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutput { + s.Status = &v + return s +} + +type GetBucketWebsiteInput struct { + _ struct{} `locationName:"GetBucketWebsiteRequest" type:"structure"` + + // The bucket name for which to get the website configuration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketWebsiteInput) SetBucket(v string) *GetBucketWebsiteInput { + s.Bucket = &v + return s +} + +func (s *GetBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketWebsiteInput) SetExpectedBucketOwner(v string) *GetBucketWebsiteInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketWebsiteInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketWebsiteInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketWebsiteOutput struct { + _ struct{} `type:"structure"` + + // The object key name of the website error document to use for 4XX class errors. + ErrorDocument *ErrorDocument `type:"structure"` + + // The name of the index document for the website (for example index.html). + IndexDocument *IndexDocument `type:"structure"` + + // Specifies the redirect behavior of all requests to a website endpoint of + // an Amazon S3 bucket. + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + // Rules that define when a redirect is applied and the redirect behavior. + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketWebsiteOutput) GoString() string { + return s.String() +} + +// SetErrorDocument sets the ErrorDocument field's value. +func (s *GetBucketWebsiteOutput) SetErrorDocument(v *ErrorDocument) *GetBucketWebsiteOutput { + s.ErrorDocument = v + return s +} + +// SetIndexDocument sets the IndexDocument field's value. +func (s *GetBucketWebsiteOutput) SetIndexDocument(v *IndexDocument) *GetBucketWebsiteOutput { + s.IndexDocument = v + return s +} + +// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. +func (s *GetBucketWebsiteOutput) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *GetBucketWebsiteOutput { + s.RedirectAllRequestsTo = v + return s +} + +// SetRoutingRules sets the RoutingRules field's value. +func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWebsiteOutput { + s.RoutingRules = v + return s +} + +type GetObjectAclInput struct { + _ struct{} `locationName:"GetObjectAclRequest" type:"structure"` + + // The bucket name that contains the object for which to get the ACL information. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key of the object for which to get the ACL information. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Version ID used to reference a specific version of the object. + // + // This functionality is not supported for directory buckets. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectAclInput) SetBucket(v string) *GetObjectAclInput { + s.Bucket = &v + return s +} + +func (s *GetObjectAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectAclInput) SetExpectedBucketOwner(v string) *GetObjectAclInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectAclInput) SetKey(v string) *GetObjectAclInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectAclInput) SetRequestPayer(v string) *GetObjectAclInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput { + s.VersionId = &v + return s +} + +func (s *GetObjectAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectAclInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + // Container for the bucket owner's display name and ID. + Owner *Owner `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAclOutput) GoString() string { + return s.String() +} + +// SetGrants sets the Grants field's value. +func (s *GetObjectAclOutput) SetGrants(v []*Grant) *GetObjectAclOutput { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *GetObjectAclOutput) SetOwner(v *Owner) *GetObjectAclOutput { + s.Owner = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput { + s.RequestCharged = &v + return s +} + +type GetObjectAttributesInput struct { + _ struct{} `locationName:"GetObjectAttributesRequest" type:"structure"` + + // The name of the bucket that contains the object. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The object key. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Sets the maximum number of parts to return. + MaxParts *int64 `location:"header" locationName:"x-amz-max-parts" type:"integer"` + + // Specifies the fields at the root level that you want returned in the response. + // Fields that you do not specify are not returned. + // + // ObjectAttributes is a required field + ObjectAttributes []*string `location:"header" locationName:"x-amz-object-attributes" type:"list" required:"true" enum:"ObjectAttributes"` + + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. + PartNumberMarker *int64 `location:"header" locationName:"x-amz-part-number-marker" type:"integer"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + // + // This functionality is not supported for directory buckets. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by GetObjectAttributesInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // The version ID used to reference a specific version of the object. + // + // S3 Versioning isn't enabled and supported for directory buckets. For this + // API operation, only the null value of the version ID is supported by directory + // buckets. You can only specify null to the versionId query parameter in the + // request. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectAttributesInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.ObjectAttributes == nil { + invalidParams.Add(request.NewErrParamRequired("ObjectAttributes")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectAttributesInput) SetBucket(v string) *GetObjectAttributesInput { + s.Bucket = &v + return s +} + +func (s *GetObjectAttributesInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectAttributesInput) SetExpectedBucketOwner(v string) *GetObjectAttributesInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectAttributesInput) SetKey(v string) *GetObjectAttributesInput { + s.Key = &v + return s +} + +// SetMaxParts sets the MaxParts field's value. +func (s *GetObjectAttributesInput) SetMaxParts(v int64) *GetObjectAttributesInput { + s.MaxParts = &v + return s +} + +// SetObjectAttributes sets the ObjectAttributes field's value. +func (s *GetObjectAttributesInput) SetObjectAttributes(v []*string) *GetObjectAttributesInput { + s.ObjectAttributes = v + return s +} + +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *GetObjectAttributesInput) SetPartNumberMarker(v int64) *GetObjectAttributesInput { + s.PartNumberMarker = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectAttributesInput) SetRequestPayer(v string) *GetObjectAttributesInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectAttributesInput) SetSSECustomerAlgorithm(v string) *GetObjectAttributesInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *GetObjectAttributesInput) SetSSECustomerKey(v string) *GetObjectAttributesInput { + s.SSECustomerKey = &v + return s +} + +func (s *GetObjectAttributesInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectAttributesInput) SetSSECustomerKeyMD5(v string) *GetObjectAttributesInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectAttributesInput) SetVersionId(v string) *GetObjectAttributesInput { + s.VersionId = &v + return s +} + +func (s *GetObjectAttributesInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectAttributesInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectAttributesInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectAttributesOutput struct { + _ struct{} `type:"structure"` + + // The checksum or digest of the object. + Checksum *Checksum `type:"structure"` + + // Specifies whether the object retrieved was (true) or was not (false) a delete + // marker. If false, this response header does not appear in the response. + // + // This functionality is not supported for directory buckets. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL. + ETag *string `type:"string"` + + // The creation date of the object. + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` + + // A collection of parts associated with a multipart upload. + ObjectParts *GetObjectAttributesParts `type:"structure"` + + // The size of the object in bytes. + ObjectSize *int64 `type:"long"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides the storage class information of the object. Amazon S3 returns this + // header for all objects except for S3 Standard storage class objects. + // + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). + // + // Directory buckets - Only the S3 Express One Zone storage class is supported + // by directory buckets to store objects. + StorageClass *string `type:"string" enum:"StorageClass"` + + // The version ID of the object. + // + // This functionality is not supported for directory buckets. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAttributesOutput) GoString() string { + return s.String() +} + +// SetChecksum sets the Checksum field's value. +func (s *GetObjectAttributesOutput) SetChecksum(v *Checksum) *GetObjectAttributesOutput { + s.Checksum = v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *GetObjectAttributesOutput) SetDeleteMarker(v bool) *GetObjectAttributesOutput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *GetObjectAttributesOutput) SetETag(v string) *GetObjectAttributesOutput { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *GetObjectAttributesOutput) SetLastModified(v time.Time) *GetObjectAttributesOutput { + s.LastModified = &v + return s +} + +// SetObjectParts sets the ObjectParts field's value. +func (s *GetObjectAttributesOutput) SetObjectParts(v *GetObjectAttributesParts) *GetObjectAttributesOutput { + s.ObjectParts = v + return s +} + +// SetObjectSize sets the ObjectSize field's value. +func (s *GetObjectAttributesOutput) SetObjectSize(v int64) *GetObjectAttributesOutput { + s.ObjectSize = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectAttributesOutput) SetRequestCharged(v string) *GetObjectAttributesOutput { + s.RequestCharged = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *GetObjectAttributesOutput) SetStorageClass(v string) *GetObjectAttributesOutput { + s.StorageClass = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectAttributesOutput) SetVersionId(v string) *GetObjectAttributesOutput { + s.VersionId = &v + return s +} + +// A collection of parts associated with a multipart upload. +type GetObjectAttributesParts struct { + _ struct{} `type:"structure"` + + // Indicates whether the returned list of parts is truncated. A value of true + // indicates that the list was truncated. A list can be truncated if the number + // of parts exceeds the limit returned in the MaxParts element. + IsTruncated *bool `type:"boolean"` + + // The maximum number of parts allowed in the response. + MaxParts *int64 `type:"integer"` + + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the PartNumberMarker request parameter in + // a subsequent request. + NextPartNumberMarker *int64 `type:"integer"` + + // The marker for the current part. + PartNumberMarker *int64 `type:"integer"` + + // A container for elements related to a particular part. A response can contain + // zero or more Parts elements. + // + // * General purpose buckets - For GetObjectAttributes, if a additional checksum + // (including x-amz-checksum-crc32, x-amz-checksum-crc32c, x-amz-checksum-sha1, + // or x-amz-checksum-sha256) isn't applied to the object specified in the + // request, the response doesn't return Part. + // + // * Directory buckets - For GetObjectAttributes, no matter whether a additional + // checksum is applied to the object specified in the request, the response + // returns Part. + Parts []*ObjectPart `locationName:"Part" type:"list" flattened:"true"` + + // The total number of parts. + TotalPartsCount *int64 `locationName:"PartsCount" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAttributesParts) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAttributesParts) GoString() string { + return s.String() +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *GetObjectAttributesParts) SetIsTruncated(v bool) *GetObjectAttributesParts { + s.IsTruncated = &v + return s +} + +// SetMaxParts sets the MaxParts field's value. +func (s *GetObjectAttributesParts) SetMaxParts(v int64) *GetObjectAttributesParts { + s.MaxParts = &v + return s +} + +// SetNextPartNumberMarker sets the NextPartNumberMarker field's value. +func (s *GetObjectAttributesParts) SetNextPartNumberMarker(v int64) *GetObjectAttributesParts { + s.NextPartNumberMarker = &v + return s +} + +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *GetObjectAttributesParts) SetPartNumberMarker(v int64) *GetObjectAttributesParts { + s.PartNumberMarker = &v + return s +} + +// SetParts sets the Parts field's value. +func (s *GetObjectAttributesParts) SetParts(v []*ObjectPart) *GetObjectAttributesParts { + s.Parts = v + return s +} + +// SetTotalPartsCount sets the TotalPartsCount field's value. +func (s *GetObjectAttributesParts) SetTotalPartsCount(v int64) *GetObjectAttributesParts { + s.TotalPartsCount = &v + return s +} + +type GetObjectInput struct { + _ struct{} `locationName:"GetObjectRequest" type:"structure"` + + // The bucket name containing the object. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Object Lambda access points - When you use this action with an Object Lambda + // access point, you must direct requests to the Object Lambda access point + // hostname. The Object Lambda access point hostname takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // To retrieve the checksum, this mode must be enabled. + // + // The AWS SDK for Go v1 does not support automatic response payload checksum + // validation. This feature is available in the AWS SDK for Go v2. + ChecksumMode *string `location:"header" locationName:"x-amz-checksum-mode" type:"string" enum:"ChecksumMode"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Return the object only if its entity tag (ETag) is the same as the one specified + // in this header; otherwise, return a 412 Precondition Failed error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since + // condition evaluates to false; then, S3 returns 200 OK and the data requested. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time; + // otherwise, return a 304 Not Modified error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in + // the request as follows:If-None-Match condition evaluates to false, and; If-Modified-Since + // condition evaluates to true; then, S3 returns 304 Not Modified status code. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified in this header; otherwise, return a 304 Not Modified error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in + // the request as follows:If-None-Match condition evaluates to false, and; If-Modified-Since + // condition evaluates to true; then, S3 returns 304 Not Modified HTTP status + // code. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time; + // otherwise, return a 412 Precondition Failed error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since + // condition evaluates to false; then, S3 returns 200 OK and the data requested. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` + + // Key of the object to get. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of the object being read. This is a positive integer between + // 1 and 10,000. Effectively performs a 'ranged' GET request for the part specified. + // Useful for downloading just a part of an object. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` + + // Downloads the specified byte range of an object. For more information about + // the HTTP Range header, see https://www.rfc-editor.org/rfc/rfc9110.html#name-range + // (https://www.rfc-editor.org/rfc/rfc9110.html#name-range). + // + // Amazon S3 doesn't support retrieving multiple ranges of data per GET request. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Sets the Cache-Control header of the response. + ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` + + // Sets the Content-Disposition header of the response. + ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` + + // Sets the Content-Type header of the response. + ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` + + // Sets the Expires header of the response. + ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"rfc822"` + + // Specifies the algorithm to use when decrypting the object (for example, AES256). + // + // If you encrypt an object by using server-side encryption with customer-provided + // encryption keys (SSE-C) when you store the object in Amazon S3, then when + // you GET the object, you must use the following headers: + // + // * x-amz-server-side-encryption-customer-algorithm + // + // * x-amz-server-side-encryption-customer-key + // + // * x-amz-server-side-encryption-customer-key-MD5 + // + // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided + // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key that you originally provided + // for Amazon S3 to encrypt the data before storing it. This value is used to + // decrypt the object when recovering it and must match the one used when storing + // the data. The key must be appropriate for use with the algorithm specified + // in the x-amz-server-side-encryption-customer-algorithm header. + // + // If you encrypt an object by using server-side encryption with customer-provided + // encryption keys (SSE-C) when you store the object in Amazon S3, then when + // you GET the object, you must use the following headers: + // + // * x-amz-server-side-encryption-customer-algorithm + // + // * x-amz-server-side-encryption-customer-key + // + // * x-amz-server-side-encryption-customer-key-MD5 + // + // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided + // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by GetObjectInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the customer-provided encryption key + // according to RFC 1321. Amazon S3 uses this header for a message integrity + // check to ensure that the encryption key was transmitted without error. + // + // If you encrypt an object by using server-side encryption with customer-provided + // encryption keys (SSE-C) when you store the object in Amazon S3, then when + // you GET the object, you must use the following headers: + // + // * x-amz-server-side-encryption-customer-algorithm + // + // * x-amz-server-side-encryption-customer-key + // + // * x-amz-server-side-encryption-customer-key-MD5 + // + // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided + // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Version ID used to reference a specific version of the object. + // + // By default, the GetObject operation returns the current version of an object. + // To return a different version, use the versionId subresource. + // + // * If you include a versionId in your request header, you must have the + // s3:GetObjectVersion permission to access a specific version of an object. + // The s3:GetObject permission is not required in this scenario. + // + // * If you request the current version of an object without a specific versionId + // in the request header, only the s3:GetObject permission is required. The + // s3:GetObjectVersion permission is not required in this scenario. + // + // * Directory buckets - S3 Versioning isn't enabled and supported for directory + // buckets. For this API operation, only the null value of the version ID + // is supported by directory buckets. You can only specify null to the versionId + // query parameter in the request. + // + // For more information about versioning, see PutBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html). + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectInput) SetBucket(v string) *GetObjectInput { + s.Bucket = &v + return s +} + +func (s *GetObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumMode sets the ChecksumMode field's value. +func (s *GetObjectInput) SetChecksumMode(v string) *GetObjectInput { + s.ChecksumMode = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectInput) SetExpectedBucketOwner(v string) *GetObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetIfMatch sets the IfMatch field's value. +func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput { + s.IfMatch = &v + return s +} + +// SetIfModifiedSince sets the IfModifiedSince field's value. +func (s *GetObjectInput) SetIfModifiedSince(v time.Time) *GetObjectInput { + s.IfModifiedSince = &v + return s +} + +// SetIfNoneMatch sets the IfNoneMatch field's value. +func (s *GetObjectInput) SetIfNoneMatch(v string) *GetObjectInput { + s.IfNoneMatch = &v + return s +} + +// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. +func (s *GetObjectInput) SetIfUnmodifiedSince(v time.Time) *GetObjectInput { + s.IfUnmodifiedSince = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectInput) SetKey(v string) *GetObjectInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *GetObjectInput) SetPartNumber(v int64) *GetObjectInput { + s.PartNumber = &v + return s +} + +// SetRange sets the Range field's value. +func (s *GetObjectInput) SetRange(v string) *GetObjectInput { + s.Range = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectInput) SetRequestPayer(v string) *GetObjectInput { + s.RequestPayer = &v + return s +} + +// SetResponseCacheControl sets the ResponseCacheControl field's value. +func (s *GetObjectInput) SetResponseCacheControl(v string) *GetObjectInput { + s.ResponseCacheControl = &v + return s +} + +// SetResponseContentDisposition sets the ResponseContentDisposition field's value. +func (s *GetObjectInput) SetResponseContentDisposition(v string) *GetObjectInput { + s.ResponseContentDisposition = &v + return s +} + +// SetResponseContentEncoding sets the ResponseContentEncoding field's value. +func (s *GetObjectInput) SetResponseContentEncoding(v string) *GetObjectInput { + s.ResponseContentEncoding = &v + return s +} + +// SetResponseContentLanguage sets the ResponseContentLanguage field's value. +func (s *GetObjectInput) SetResponseContentLanguage(v string) *GetObjectInput { + s.ResponseContentLanguage = &v + return s +} + +// SetResponseContentType sets the ResponseContentType field's value. +func (s *GetObjectInput) SetResponseContentType(v string) *GetObjectInput { + s.ResponseContentType = &v + return s +} + +// SetResponseExpires sets the ResponseExpires field's value. +func (s *GetObjectInput) SetResponseExpires(v time.Time) *GetObjectInput { + s.ResponseExpires = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectInput) SetSSECustomerAlgorithm(v string) *GetObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *GetObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { + s.VersionId = &v + return s +} + +func (s *GetObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectLegalHoldInput struct { + _ struct{} `locationName:"GetObjectLegalHoldRequest" type:"structure"` + + // The bucket name containing the object whose legal hold status you want to + // retrieve. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key name for the object whose legal hold status you want to retrieve. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The version ID of the object whose legal hold status you want to retrieve. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectLegalHoldInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectLegalHoldInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectLegalHoldInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectLegalHoldInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectLegalHoldInput) SetBucket(v string) *GetObjectLegalHoldInput { + s.Bucket = &v + return s +} + +func (s *GetObjectLegalHoldInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectLegalHoldInput) SetExpectedBucketOwner(v string) *GetObjectLegalHoldInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectLegalHoldInput) SetKey(v string) *GetObjectLegalHoldInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectLegalHoldInput) SetRequestPayer(v string) *GetObjectLegalHoldInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectLegalHoldInput) SetVersionId(v string) *GetObjectLegalHoldInput { + s.VersionId = &v + return s +} + +func (s *GetObjectLegalHoldInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectLegalHoldInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectLegalHoldInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectLegalHoldOutput struct { + _ struct{} `type:"structure" payload:"LegalHold"` + + // The current legal hold status for the specified object. + LegalHold *ObjectLockLegalHold `locationName:"LegalHold" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectLegalHoldOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectLegalHoldOutput) GoString() string { + return s.String() +} + +// SetLegalHold sets the LegalHold field's value. +func (s *GetObjectLegalHoldOutput) SetLegalHold(v *ObjectLockLegalHold) *GetObjectLegalHoldOutput { + s.LegalHold = v + return s +} + +type GetObjectLockConfigurationInput struct { + _ struct{} `locationName:"GetObjectLockConfigurationRequest" type:"structure"` + + // The bucket whose Object Lock configuration you want to retrieve. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectLockConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectLockConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectLockConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectLockConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectLockConfigurationInput) SetBucket(v string) *GetObjectLockConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetObjectLockConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectLockConfigurationInput) SetExpectedBucketOwner(v string) *GetObjectLockConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetObjectLockConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectLockConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectLockConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectLockConfigurationOutput struct { + _ struct{} `type:"structure" payload:"ObjectLockConfiguration"` + + // The specified bucket's Object Lock configuration. + ObjectLockConfiguration *ObjectLockConfiguration `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectLockConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectLockConfigurationOutput) GoString() string { + return s.String() +} + +// SetObjectLockConfiguration sets the ObjectLockConfiguration field's value. +func (s *GetObjectLockConfigurationOutput) SetObjectLockConfiguration(v *ObjectLockConfiguration) *GetObjectLockConfigurationOutput { + s.ObjectLockConfiguration = v + return s +} + +type GetObjectOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + // Indicates that a range of bytes was specified in the request. + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Object data. + Body io.ReadCloser `type:"blob"` + + // Indicates whether the object uses an S3 Bucket Key for server-side encryption + // with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Indicates what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The portion of the object returned in the response. + ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Indicates whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + // + // * If the current version of the object is a delete marker, Amazon S3 behaves + // as if the object was deleted and includes x-amz-delete-marker: true in + // the response. + // + // * If the specified version in the request is a delete marker, the response + // returns a 405 Method Not Allowed error and the Last-Modified: timestamp + // response header. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An entity tag (ETag) is an opaque identifier assigned by a web server to + // a specific version of a resource found at a URL. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PutBucketLifecycleConfiguration + // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)), + // the response includes this header. It includes the expiry-date and rule-id + // key-value pairs providing object expiration information. The value of the + // rule-id is URL-encoded. + // + // This functionality is not supported for directory buckets. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Date and time when the object was last modified. + // + // General purpose buckets - When you specify a versionId of the object in your + // request, if the specified version in the request is a delete marker, the + // response returns a 405 Method Not Allowed error and the Last-Modified: timestamp + // response header. + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` + + // A map of metadata to store with the object in S3. + // + // By default unmarshaled keys are written as a map keys in following canonicalized format: + // the first letter and any letter following a hyphen will be capitalized, and the rest as lowercase. + // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in the headers + // that are prefixed with x-amz-meta-. This can happen if you create metadata + // using an API like SOAP that supports more flexible metadata than the REST + // API. For example, using SOAP, you can create metadata whose values are not + // legal HTTP headers. + // + // This functionality is not supported for directory buckets. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + // Indicates whether this object has an active legal hold. This field is only + // returned if you have permission to view an object's legal hold status. + // + // This functionality is not supported for directory buckets. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode that's currently in place for this object. + // + // This functionality is not supported for directory buckets. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when this object's Object Lock will expire. + // + // This functionality is not supported for directory buckets. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // The count of parts this object has. This value is only returned if you specify + // partNumber in your request and the object was uploaded as a multipart upload. + PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + + // Amazon S3 can return this if your request involves a bucket that is either + // a source or destination in a replication rule. + // + // This functionality is not supported for directory buckets. + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides information about object restoration action and expiration time + // of the restored object copy. + // + // This functionality is not supported for directory buckets. Only the S3 Express + // One Zone storage class is supported by directory buckets to store objects. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to confirm the encryption algorithm + // that's used. + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide the round-trip message integrity + // verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by GetObjectOutput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256, aws:kms, aws:kms:dsse). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Provides storage class information of the object. Amazon S3 returns this + // header for all objects except for S3 Standard storage class objects. + // + // Directory buckets - Only the S3 Express One Zone storage class is supported + // by directory buckets to store objects. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The number of tags, if any, on the object, when you have the relevant permission + // to read object tags. + // + // You can use GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) + // to retrieve the tag set associated with an object. + // + // This functionality is not supported for directory buckets. + TagCount *int64 `location:"header" locationName:"x-amz-tagging-count" type:"integer"` + + // Version ID of the object. + // + // This functionality is not supported for directory buckets. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + // + // This functionality is not supported for directory buckets. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectOutput) GoString() string { + return s.String() +} + +// SetAcceptRanges sets the AcceptRanges field's value. +func (s *GetObjectOutput) SetAcceptRanges(v string) *GetObjectOutput { + s.AcceptRanges = &v + return s +} + +// SetBody sets the Body field's value. +func (s *GetObjectOutput) SetBody(v io.ReadCloser) *GetObjectOutput { + s.Body = v + return s +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *GetObjectOutput) SetBucketKeyEnabled(v bool) *GetObjectOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *GetObjectOutput) SetCacheControl(v string) *GetObjectOutput { + s.CacheControl = &v + return s +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *GetObjectOutput) SetChecksumCRC32(v string) *GetObjectOutput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *GetObjectOutput) SetChecksumCRC32C(v string) *GetObjectOutput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *GetObjectOutput) SetChecksumSHA1(v string) *GetObjectOutput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *GetObjectOutput) SetChecksumSHA256(v string) *GetObjectOutput { + s.ChecksumSHA256 = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *GetObjectOutput) SetContentDisposition(v string) *GetObjectOutput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *GetObjectOutput) SetContentEncoding(v string) *GetObjectOutput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *GetObjectOutput) SetContentLanguage(v string) *GetObjectOutput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *GetObjectOutput) SetContentLength(v int64) *GetObjectOutput { + s.ContentLength = &v + return s +} + +// SetContentRange sets the ContentRange field's value. +func (s *GetObjectOutput) SetContentRange(v string) *GetObjectOutput { + s.ContentRange = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *GetObjectOutput) SetContentType(v string) *GetObjectOutput { + s.ContentType = &v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *GetObjectOutput) SetDeleteMarker(v bool) *GetObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *GetObjectOutput) SetETag(v string) *GetObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *GetObjectOutput) SetExpiration(v string) *GetObjectOutput { + s.Expiration = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *GetObjectOutput) SetExpires(v string) *GetObjectOutput { + s.Expires = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *GetObjectOutput) SetLastModified(v time.Time) *GetObjectOutput { + s.LastModified = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *GetObjectOutput) SetMetadata(v map[string]*string) *GetObjectOutput { + s.Metadata = v + return s +} + +// SetMissingMeta sets the MissingMeta field's value. +func (s *GetObjectOutput) SetMissingMeta(v int64) *GetObjectOutput { + s.MissingMeta = &v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *GetObjectOutput) SetObjectLockLegalHoldStatus(v string) *GetObjectOutput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *GetObjectOutput) SetObjectLockMode(v string) *GetObjectOutput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *GetObjectOutput) SetObjectLockRetainUntilDate(v time.Time) *GetObjectOutput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetPartsCount sets the PartsCount field's value. +func (s *GetObjectOutput) SetPartsCount(v int64) *GetObjectOutput { + s.PartsCount = &v + return s +} + +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *GetObjectOutput) SetReplicationStatus(v string) *GetObjectOutput { + s.ReplicationStatus = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectOutput) SetRequestCharged(v string) *GetObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestore sets the Restore field's value. +func (s *GetObjectOutput) SetRestore(v string) *GetObjectOutput { + s.Restore = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectOutput) SetSSECustomerAlgorithm(v string) *GetObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectOutput) SetSSECustomerKeyMD5(v string) *GetObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *GetObjectOutput) SetSSEKMSKeyId(v string) *GetObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *GetObjectOutput) SetServerSideEncryption(v string) *GetObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *GetObjectOutput) SetStorageClass(v string) *GetObjectOutput { + s.StorageClass = &v + return s +} + +// SetTagCount sets the TagCount field's value. +func (s *GetObjectOutput) SetTagCount(v int64) *GetObjectOutput { + s.TagCount = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectOutput) SetVersionId(v string) *GetObjectOutput { + s.VersionId = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput { + s.WebsiteRedirectLocation = &v + return s +} + +type GetObjectRetentionInput struct { + _ struct{} `locationName:"GetObjectRetentionRequest" type:"structure"` + + // The bucket name containing the object whose retention settings you want to + // retrieve. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key name for the object whose retention settings you want to retrieve. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The version ID for the object whose retention settings you want to retrieve. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectRetentionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectRetentionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectRetentionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectRetentionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectRetentionInput) SetBucket(v string) *GetObjectRetentionInput { + s.Bucket = &v + return s +} + +func (s *GetObjectRetentionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectRetentionInput) SetExpectedBucketOwner(v string) *GetObjectRetentionInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectRetentionInput) SetKey(v string) *GetObjectRetentionInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectRetentionInput) SetRequestPayer(v string) *GetObjectRetentionInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectRetentionInput) SetVersionId(v string) *GetObjectRetentionInput { + s.VersionId = &v + return s +} + +func (s *GetObjectRetentionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectRetentionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectRetentionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectRetentionOutput struct { + _ struct{} `type:"structure" payload:"Retention"` + + // The container element for an object's retention settings. + Retention *ObjectLockRetention `locationName:"Retention" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectRetentionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectRetentionOutput) GoString() string { + return s.String() +} + +// SetRetention sets the Retention field's value. +func (s *GetObjectRetentionOutput) SetRetention(v *ObjectLockRetention) *GetObjectRetentionOutput { + s.Retention = v + return s +} + +type GetObjectTaggingInput struct { + _ struct{} `locationName:"GetObjectTaggingRequest" type:"structure"` + + // The bucket name containing the object for which to get the tagging information. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which to get the tagging information. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The versionId of the object for which to get the tagging information. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectTaggingInput) SetBucket(v string) *GetObjectTaggingInput { + s.Bucket = &v + return s +} + +func (s *GetObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectTaggingInput) SetExpectedBucketOwner(v string) *GetObjectTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectTaggingInput) SetRequestPayer(v string) *GetObjectTaggingInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput { + s.VersionId = &v + return s +} + +func (s *GetObjectTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // Contains the tag set. + // + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` + + // The versionId of the object for which you got the tagging information. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetTagSet sets the TagSet field's value. +func (s *GetObjectTaggingOutput) SetTagSet(v []*Tag) *GetObjectTaggingOutput { + s.TagSet = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput { + s.VersionId = &v + return s +} + +type GetObjectTorrentInput struct { + _ struct{} `locationName:"GetObjectTorrentRequest" type:"structure"` + + // The name of the bucket containing the object for which to get the torrent + // files. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The object key for which to get the information. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectTorrentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectTorrentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectTorrentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectTorrentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectTorrentInput) SetBucket(v string) *GetObjectTorrentInput { + s.Bucket = &v + return s +} + +func (s *GetObjectTorrentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectTorrentInput) SetExpectedBucketOwner(v string) *GetObjectTorrentInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectTorrentInput) SetKey(v string) *GetObjectTorrentInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput { + s.RequestPayer = &v + return s +} + +func (s *GetObjectTorrentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectTorrentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectTorrentInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectTorrentOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + // A Bencoded dictionary as defined by the BitTorrent specification + Body io.ReadCloser `type:"blob"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectTorrentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectTorrentOutput) GoString() string { + return s.String() +} + +// SetBody sets the Body field's value. +func (s *GetObjectTorrentOutput) SetBody(v io.ReadCloser) *GetObjectTorrentOutput { + s.Body = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOutput { + s.RequestCharged = &v + return s +} + +type GetPublicAccessBlockInput struct { + _ struct{} `locationName:"GetPublicAccessBlockRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you + // want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetPublicAccessBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetPublicAccessBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPublicAccessBlockInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetPublicAccessBlockInput) SetBucket(v string) *GetPublicAccessBlockInput { + s.Bucket = &v + return s +} + +func (s *GetPublicAccessBlockInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetPublicAccessBlockInput) SetExpectedBucketOwner(v string) *GetPublicAccessBlockInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetPublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetPublicAccessBlockInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetPublicAccessBlockInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetPublicAccessBlockOutput struct { + _ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"` + + // The PublicAccessBlock configuration currently in effect for this Amazon S3 + // bucket. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetPublicAccessBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetPublicAccessBlockOutput) GoString() string { + return s.String() +} + +// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. +func (s *GetPublicAccessBlockOutput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *GetPublicAccessBlockOutput { + s.PublicAccessBlockConfiguration = v + return s +} + +// Container for S3 Glacier job parameters. +type GlacierJobParameters struct { + _ struct{} `type:"structure"` + + // Retrieval tier at which the restore will be processed. + // + // Tier is a required field + Tier *string `type:"string" required:"true" enum:"Tier"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlacierJobParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlacierJobParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GlacierJobParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GlacierJobParameters"} + if s.Tier == nil { + invalidParams.Add(request.NewErrParamRequired("Tier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTier sets the Tier field's value. +func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters { + s.Tier = &v + return s +} + +// Container for grant information. +type Grant struct { + _ struct{} `type:"structure"` + + // The person being granted permissions. + Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Specifies the permission given to the grantee. + Permission *string `type:"string" enum:"Permission"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Grant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Grant) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Grant) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grant"} + if s.Grantee != nil { + if err := s.Grantee.Validate(); err != nil { + invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrantee sets the Grantee field's value. +func (s *Grant) SetGrantee(v *Grantee) *Grant { + s.Grantee = v + return s +} + +// SetPermission sets the Permission field's value. +func (s *Grant) SetPermission(v string) *Grant { + s.Permission = &v + return s +} + +// Container for the person being granted permissions. +type Grantee struct { + _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Screen name of the grantee. + DisplayName *string `type:"string"` + + // Email address of the grantee. + // + // Using email addresses to specify a grantee is only supported in the following + // Amazon Web Services Regions: + // + // * US East (N. Virginia) + // + // * US West (N. California) + // + // * US West (Oregon) + // + // * Asia Pacific (Singapore) + // + // * Asia Pacific (Sydney) + // + // * Asia Pacific (Tokyo) + // + // * Europe (Ireland) + // + // * South America (São Paulo) + // + // For a list of all the Amazon S3 supported Regions and endpoints, see Regions + // and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + EmailAddress *string `type:"string"` + + // The canonical user ID of the grantee. + ID *string `type:"string"` + + // Type of grantee + // + // Type is a required field + Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"` + + // URI of the grantee group. + URI *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Grantee) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Grantee) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Grantee) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grantee"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Grantee) SetDisplayName(v string) *Grantee { + s.DisplayName = &v + return s +} + +// SetEmailAddress sets the EmailAddress field's value. +func (s *Grantee) SetEmailAddress(v string) *Grantee { + s.EmailAddress = &v + return s +} + +// SetID sets the ID field's value. +func (s *Grantee) SetID(v string) *Grantee { + s.ID = &v + return s +} + +// SetType sets the Type field's value. +func (s *Grantee) SetType(v string) *Grantee { + s.Type = &v + return s +} + +// SetURI sets the URI field's value. +func (s *Grantee) SetURI(v string) *Grantee { + s.URI = &v + return s +} + +type HeadBucketInput struct { + _ struct{} `locationName:"HeadBucketRequest" type:"structure"` + + // The bucket name. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Object Lambda access points - When you use this API operation with an Object + // Lambda access point, provide the alias of the Object Lambda access point + // in place of the bucket name. If the Object Lambda access point alias in a + // request is not valid, the error code InvalidAccessPointAliasError is returned. + // For more information about InvalidAccessPointAliasError, see List of Error + // Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HeadBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HeadBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeadBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *HeadBucketInput) SetBucket(v string) *HeadBucketInput { + s.Bucket = &v + return s +} + +func (s *HeadBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *HeadBucketInput) SetExpectedBucketOwner(v string) *HeadBucketInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *HeadBucketInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *HeadBucketInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s HeadBucketInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type HeadBucketOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the bucket name used in the request is an access point + // alias. + // + // This functionality is not supported for directory buckets. + AccessPointAlias *bool `location:"header" locationName:"x-amz-access-point-alias" type:"boolean"` + + // The name of the location where the bucket will be created. + // + // For directory buckets, the AZ ID of the Availability Zone where the bucket + // is created. An example AZ ID value is usw2-az1. + // + // This functionality is only supported by directory buckets. + BucketLocationName *string `location:"header" locationName:"x-amz-bucket-location-name" type:"string"` + + // The type of location where the bucket is created. + // + // This functionality is only supported by directory buckets. + BucketLocationType *string `location:"header" locationName:"x-amz-bucket-location-type" type:"string" enum:"LocationType"` + + // The Region that the bucket is located. + // + // This functionality is not supported for directory buckets. + BucketRegion *string `location:"header" locationName:"x-amz-bucket-region" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HeadBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HeadBucketOutput) GoString() string { + return s.String() +} + +// SetAccessPointAlias sets the AccessPointAlias field's value. +func (s *HeadBucketOutput) SetAccessPointAlias(v bool) *HeadBucketOutput { + s.AccessPointAlias = &v + return s +} + +// SetBucketLocationName sets the BucketLocationName field's value. +func (s *HeadBucketOutput) SetBucketLocationName(v string) *HeadBucketOutput { + s.BucketLocationName = &v + return s +} + +// SetBucketLocationType sets the BucketLocationType field's value. +func (s *HeadBucketOutput) SetBucketLocationType(v string) *HeadBucketOutput { + s.BucketLocationType = &v + return s +} + +// SetBucketRegion sets the BucketRegion field's value. +func (s *HeadBucketOutput) SetBucketRegion(v string) *HeadBucketOutput { + s.BucketRegion = &v + return s +} + +type HeadObjectInput struct { + _ struct{} `locationName:"HeadObjectRequest" type:"structure"` + + // The name of the bucket that contains the object. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // To retrieve the checksum, this parameter must be enabled. + // + // In addition, if you enable ChecksumMode and the object is encrypted with + // Amazon Web Services Key Management Service (Amazon Web Services KMS), you + // must have permission to use the kms:Decrypt action for the request to succeed. + ChecksumMode *string `location:"header" locationName:"x-amz-checksum-mode" type:"string" enum:"ChecksumMode"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Return the object only if its entity tag (ETag) is the same as the one specified; + // otherwise, return a 412 (precondition failed) error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: + // + // * If-Match condition evaluates to true, and; + // + // * If-Unmodified-Since condition evaluates to false; + // + // Then Amazon S3 returns 200 OK and the data requested. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time; + // otherwise, return a 304 (not modified) error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in + // the request as follows: + // + // * If-None-Match condition evaluates to false, and; + // + // * If-Modified-Since condition evaluates to true; + // + // Then Amazon S3 returns the 304 Not Modified response code. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified; otherwise, return a 304 (not modified) error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in + // the request as follows: + // + // * If-None-Match condition evaluates to false, and; + // + // * If-Modified-Since condition evaluates to true; + // + // Then Amazon S3 returns the 304 Not Modified response code. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time; + // otherwise, return a 412 (precondition failed) error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: + // + // * If-Match condition evaluates to true, and; + // + // * If-Unmodified-Since condition evaluates to false; + // + // Then Amazon S3 returns 200 OK and the data requested. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` + + // The object key. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of the object being read. This is a positive integer between + // 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified. + // Useful querying about the size of the part and the number of parts in this + // object. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` + + // HeadObject returns only the metadata for an object. If the Range is satisfiable, + // only the ContentLength is affected in the response. If the Range is not satisfiable, + // S3 returns a 416 - Requested Range Not Satisfiable error. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Sets the Cache-Control header of the response. + ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` + + // Sets the Content-Disposition header of the response. + ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` + + // Sets the Content-Type header of the response. + ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` + + // Sets the Expires header of the response. + ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"rfc822"` + + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + // + // This functionality is not supported for directory buckets. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by HeadObjectInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Version ID used to reference a specific version of the object. + // + // For directory buckets in this API operation, only the null value of the version + // ID is supported. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HeadObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HeadObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeadObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *HeadObjectInput) SetBucket(v string) *HeadObjectInput { + s.Bucket = &v + return s +} + +func (s *HeadObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumMode sets the ChecksumMode field's value. +func (s *HeadObjectInput) SetChecksumMode(v string) *HeadObjectInput { + s.ChecksumMode = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *HeadObjectInput) SetExpectedBucketOwner(v string) *HeadObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetIfMatch sets the IfMatch field's value. +func (s *HeadObjectInput) SetIfMatch(v string) *HeadObjectInput { + s.IfMatch = &v + return s +} + +// SetIfModifiedSince sets the IfModifiedSince field's value. +func (s *HeadObjectInput) SetIfModifiedSince(v time.Time) *HeadObjectInput { + s.IfModifiedSince = &v + return s +} + +// SetIfNoneMatch sets the IfNoneMatch field's value. +func (s *HeadObjectInput) SetIfNoneMatch(v string) *HeadObjectInput { + s.IfNoneMatch = &v + return s +} + +// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. +func (s *HeadObjectInput) SetIfUnmodifiedSince(v time.Time) *HeadObjectInput { + s.IfUnmodifiedSince = &v + return s +} + +// SetKey sets the Key field's value. +func (s *HeadObjectInput) SetKey(v string) *HeadObjectInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *HeadObjectInput) SetPartNumber(v int64) *HeadObjectInput { + s.PartNumber = &v + return s +} + +// SetRange sets the Range field's value. +func (s *HeadObjectInput) SetRange(v string) *HeadObjectInput { + s.Range = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *HeadObjectInput) SetRequestPayer(v string) *HeadObjectInput { + s.RequestPayer = &v + return s +} + +// SetResponseCacheControl sets the ResponseCacheControl field's value. +func (s *HeadObjectInput) SetResponseCacheControl(v string) *HeadObjectInput { + s.ResponseCacheControl = &v + return s +} + +// SetResponseContentDisposition sets the ResponseContentDisposition field's value. +func (s *HeadObjectInput) SetResponseContentDisposition(v string) *HeadObjectInput { + s.ResponseContentDisposition = &v + return s +} + +// SetResponseContentEncoding sets the ResponseContentEncoding field's value. +func (s *HeadObjectInput) SetResponseContentEncoding(v string) *HeadObjectInput { + s.ResponseContentEncoding = &v + return s +} + +// SetResponseContentLanguage sets the ResponseContentLanguage field's value. +func (s *HeadObjectInput) SetResponseContentLanguage(v string) *HeadObjectInput { + s.ResponseContentLanguage = &v + return s +} + +// SetResponseContentType sets the ResponseContentType field's value. +func (s *HeadObjectInput) SetResponseContentType(v string) *HeadObjectInput { + s.ResponseContentType = &v + return s +} + +// SetResponseExpires sets the ResponseExpires field's value. +func (s *HeadObjectInput) SetResponseExpires(v time.Time) *HeadObjectInput { + s.ResponseExpires = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *HeadObjectInput) SetSSECustomerAlgorithm(v string) *HeadObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *HeadObjectInput) SetSSECustomerKey(v string) *HeadObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *HeadObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *HeadObjectInput) SetSSECustomerKeyMD5(v string) *HeadObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput { + s.VersionId = &v + return s +} + +func (s *HeadObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *HeadObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s HeadObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type HeadObjectOutput struct { + _ struct{} `type:"structure"` + + // Indicates that a range of bytes was specified. + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // The archive state of the head object. + // + // This functionality is not supported for directory buckets. + ArchiveStatus *string `location:"header" locationName:"x-amz-archive-status" type:"string" enum:"ArchiveStatus"` + + // Indicates whether the object uses an S3 Bucket Key for server-side encryption + // with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Indicates what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + // + // This functionality is not supported for directory buckets. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An entity tag (ETag) is an opaque identifier assigned by a web server to + // a specific version of a resource found at a URL. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PutBucketLifecycleConfiguration + // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)), + // the response includes this header. It includes the expiry-date and rule-id + // key-value pairs providing object expiration information. The value of the + // rule-id is URL-encoded. + // + // This functionality is not supported for directory buckets. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Date and time when the object was last modified. + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` + + // A map of metadata to store with the object in S3. + // + // By default unmarshaled keys are written as a map keys in following canonicalized format: + // the first letter and any letter following a hyphen will be capitalized, and the rest as lowercase. + // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + // + // This functionality is not supported for directory buckets. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + // Specifies whether a legal hold is in effect for this object. This header + // is only returned if the requester has the s3:GetObjectLegalHold permission. + // This header is not returned if the specified version of this object has never + // had a legal hold applied. For more information about S3 Object Lock, see + // Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + // + // This functionality is not supported for directory buckets. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode, if any, that's in effect for this object. This header + // is only returned if the requester has the s3:GetObjectRetention permission. + // For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + // + // This functionality is not supported for directory buckets. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when the Object Lock retention period expires. This header + // is only returned if the requester has the s3:GetObjectRetention permission. + // + // This functionality is not supported for directory buckets. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // The count of parts this object has. This value is only returned if you specify + // partNumber in your request and the object was uploaded as a multipart upload. + PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + + // Amazon S3 can return this header if your request involves a bucket that is + // either a source or a destination in a replication rule. + // + // In replication, you have a source bucket on which you configure replication + // and destination bucket or buckets where Amazon S3 stores object replicas. + // When you request an object (GetObject) or object metadata (HeadObject) from + // these buckets, Amazon S3 will return the x-amz-replication-status header + // in the response as follows: + // + // * If requesting an object from the source bucket, Amazon S3 will return + // the x-amz-replication-status header if the object in your request is eligible + // for replication. For example, suppose that in your replication configuration, + // you specify object prefix TaxDocs requesting Amazon S3 to replicate objects + // with key prefix TaxDocs. Any objects you upload with this key name prefix, + // for example TaxDocs/document1.pdf, are eligible for replication. For any + // object request with this key name prefix, Amazon S3 will return the x-amz-replication-status + // header with value PENDING, COMPLETED or FAILED indicating object replication + // status. + // + // * If requesting an object from a destination bucket, Amazon S3 will return + // the x-amz-replication-status header with value REPLICA if the object in + // your request is a replica that Amazon S3 created and there is no replica + // modification replication in progress. + // + // * When replicating objects to multiple destination buckets, the x-amz-replication-status + // header acts differently. The header of the source object will only return + // a value of COMPLETED when replication is successful to all destinations. + // The header will remain at value PENDING until replication has completed + // for all destinations. If one or more destinations fails replication the + // header will return FAILED. + // + // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). + // + // This functionality is not supported for directory buckets. + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If the object is an archived object (an object whose storage class is GLACIER), + // the response includes this header if either the archive restoration is in + // progress (see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) + // or an archive copy is already restored. + // + // If an archive copy is already restored, the header value indicates when Amazon + // S3 is scheduled to delete the object copy. For example: + // + // x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 Dec 2012 00:00:00 + // GMT" + // + // If the object restoration is in progress, the header returns the value ongoing-request="true". + // + // For more information about archiving objects, see Transitioning Objects: + // General Considerations (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations). + // + // This functionality is not supported for directory buckets. Only the S3 Express + // One Zone storage class is supported by directory buckets to store objects. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to confirm the encryption algorithm + // that's used. + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide the round-trip message integrity + // verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by HeadObjectOutput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256, aws:kms, aws:kms:dsse). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Provides storage class information of the object. Amazon S3 returns this + // header for all objects except for S3 Standard storage class objects. + // + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). + // + // Directory buckets - Only the S3 Express One Zone storage class is supported + // by directory buckets to store objects. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // Version ID of the object. + // + // This functionality is not supported for directory buckets. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + // + // This functionality is not supported for directory buckets. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HeadObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HeadObjectOutput) GoString() string { + return s.String() +} + +// SetAcceptRanges sets the AcceptRanges field's value. +func (s *HeadObjectOutput) SetAcceptRanges(v string) *HeadObjectOutput { + s.AcceptRanges = &v + return s +} + +// SetArchiveStatus sets the ArchiveStatus field's value. +func (s *HeadObjectOutput) SetArchiveStatus(v string) *HeadObjectOutput { + s.ArchiveStatus = &v + return s +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *HeadObjectOutput) SetBucketKeyEnabled(v bool) *HeadObjectOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *HeadObjectOutput) SetCacheControl(v string) *HeadObjectOutput { + s.CacheControl = &v + return s +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *HeadObjectOutput) SetChecksumCRC32(v string) *HeadObjectOutput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *HeadObjectOutput) SetChecksumCRC32C(v string) *HeadObjectOutput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *HeadObjectOutput) SetChecksumSHA1(v string) *HeadObjectOutput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *HeadObjectOutput) SetChecksumSHA256(v string) *HeadObjectOutput { + s.ChecksumSHA256 = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *HeadObjectOutput) SetContentDisposition(v string) *HeadObjectOutput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *HeadObjectOutput) SetContentEncoding(v string) *HeadObjectOutput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *HeadObjectOutput) SetContentLanguage(v string) *HeadObjectOutput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *HeadObjectOutput) SetContentLength(v int64) *HeadObjectOutput { + s.ContentLength = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *HeadObjectOutput) SetContentType(v string) *HeadObjectOutput { + s.ContentType = &v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *HeadObjectOutput) SetDeleteMarker(v bool) *HeadObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *HeadObjectOutput) SetETag(v string) *HeadObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *HeadObjectOutput) SetExpiration(v string) *HeadObjectOutput { + s.Expiration = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *HeadObjectOutput) SetExpires(v string) *HeadObjectOutput { + s.Expires = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *HeadObjectOutput) SetLastModified(v time.Time) *HeadObjectOutput { + s.LastModified = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *HeadObjectOutput) SetMetadata(v map[string]*string) *HeadObjectOutput { + s.Metadata = v + return s +} + +// SetMissingMeta sets the MissingMeta field's value. +func (s *HeadObjectOutput) SetMissingMeta(v int64) *HeadObjectOutput { + s.MissingMeta = &v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *HeadObjectOutput) SetObjectLockLegalHoldStatus(v string) *HeadObjectOutput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *HeadObjectOutput) SetObjectLockMode(v string) *HeadObjectOutput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *HeadObjectOutput) SetObjectLockRetainUntilDate(v time.Time) *HeadObjectOutput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetPartsCount sets the PartsCount field's value. +func (s *HeadObjectOutput) SetPartsCount(v int64) *HeadObjectOutput { + s.PartsCount = &v + return s +} + +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *HeadObjectOutput) SetReplicationStatus(v string) *HeadObjectOutput { + s.ReplicationStatus = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *HeadObjectOutput) SetRequestCharged(v string) *HeadObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestore sets the Restore field's value. +func (s *HeadObjectOutput) SetRestore(v string) *HeadObjectOutput { + s.Restore = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *HeadObjectOutput) SetSSECustomerAlgorithm(v string) *HeadObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *HeadObjectOutput) SetSSECustomerKeyMD5(v string) *HeadObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *HeadObjectOutput) SetSSEKMSKeyId(v string) *HeadObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *HeadObjectOutput) SetServerSideEncryption(v string) *HeadObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *HeadObjectOutput) SetStorageClass(v string) *HeadObjectOutput { + s.StorageClass = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *HeadObjectOutput) SetVersionId(v string) *HeadObjectOutput { + s.VersionId = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutput { + s.WebsiteRedirectLocation = &v + return s +} + +// Container for the Suffix element. +type IndexDocument struct { + _ struct{} `type:"structure"` + + // A suffix that is appended to a request that is for a directory on the website + // endpoint. (For example, if the suffix is index.html and you make a request + // to samplebucket/images/, the data that is returned will be for the object + // with the key name images/index.html.) The suffix must not be empty and must + // not include a slash character. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Suffix is a required field + Suffix *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IndexDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IndexDocument) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IndexDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IndexDocument"} + if s.Suffix == nil { + invalidParams.Add(request.NewErrParamRequired("Suffix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSuffix sets the Suffix field's value. +func (s *IndexDocument) SetSuffix(v string) *IndexDocument { + s.Suffix = &v + return s +} + +// Container element that identifies who initiated the multipart upload. +type Initiator struct { + _ struct{} `type:"structure"` + + // Name of the Principal. + // + // This functionality is not supported for directory buckets. + DisplayName *string `type:"string"` + + // If the principal is an Amazon Web Services account, it provides the Canonical + // User ID. If the principal is an IAM User, it provides a user ARN value. + // + // Directory buckets - If the principal is an Amazon Web Services account, it + // provides the Amazon Web Services account ID. If the principal is an IAM User, + // it provides a user ARN value. + ID *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Initiator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Initiator) GoString() string { + return s.String() +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Initiator) SetDisplayName(v string) *Initiator { + s.DisplayName = &v + return s +} + +// SetID sets the ID field's value. +func (s *Initiator) SetID(v string) *Initiator { + s.ID = &v + return s +} + +// Describes the serialization format of the object. +type InputSerialization struct { + _ struct{} `type:"structure"` + + // Describes the serialization of a CSV-encoded object. + CSV *CSVInput `type:"structure"` + + // Specifies object's compression format. Valid values: NONE, GZIP, BZIP2. Default + // Value: NONE. + CompressionType *string `type:"string" enum:"CompressionType"` + + // Specifies JSON as object's input serialization format. + JSON *JSONInput `type:"structure"` + + // Specifies Parquet as object's input serialization format. + Parquet *ParquetInput `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InputSerialization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InputSerialization) GoString() string { + return s.String() +} + +// SetCSV sets the CSV field's value. +func (s *InputSerialization) SetCSV(v *CSVInput) *InputSerialization { + s.CSV = v + return s +} + +// SetCompressionType sets the CompressionType field's value. +func (s *InputSerialization) SetCompressionType(v string) *InputSerialization { + s.CompressionType = &v + return s +} + +// SetJSON sets the JSON field's value. +func (s *InputSerialization) SetJSON(v *JSONInput) *InputSerialization { + s.JSON = v + return s +} + +// SetParquet sets the Parquet field's value. +func (s *InputSerialization) SetParquet(v *ParquetInput) *InputSerialization { + s.Parquet = v + return s +} + +// A container for specifying S3 Intelligent-Tiering filters. The filters determine +// the subset of objects to which the rule applies. +type IntelligentTieringAndOperator struct { + _ struct{} `type:"structure"` + + // An object key name prefix that identifies the subset of objects to which + // the configuration applies. + Prefix *string `type:"string"` + + // All of these tags must exist in the object's tag set in order for the configuration + // to apply. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IntelligentTieringAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IntelligentTieringAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntelligentTieringAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IntelligentTieringAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *IntelligentTieringAndOperator) SetPrefix(v string) *IntelligentTieringAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *IntelligentTieringAndOperator) SetTags(v []*Tag) *IntelligentTieringAndOperator { + s.Tags = v + return s +} + +// Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. +// +// For information about the S3 Intelligent-Tiering storage class, see Storage +// class for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +type IntelligentTieringConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies a bucket filter. The configuration only includes objects that meet + // the filter's criteria. + Filter *IntelligentTieringFilter `type:"structure"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // Specifies the status of the configuration. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"IntelligentTieringStatus"` + + // Specifies the S3 Intelligent-Tiering storage class tier of the configuration. + // + // Tierings is a required field + Tierings []*Tiering `locationName:"Tiering" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IntelligentTieringConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IntelligentTieringConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntelligentTieringConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IntelligentTieringConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Tierings == nil { + invalidParams.Add(request.NewErrParamRequired("Tierings")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.Tierings != nil { + for i, v := range s.Tierings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tierings", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *IntelligentTieringConfiguration) SetFilter(v *IntelligentTieringFilter) *IntelligentTieringConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *IntelligentTieringConfiguration) SetId(v string) *IntelligentTieringConfiguration { + s.Id = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *IntelligentTieringConfiguration) SetStatus(v string) *IntelligentTieringConfiguration { + s.Status = &v + return s +} + +// SetTierings sets the Tierings field's value. +func (s *IntelligentTieringConfiguration) SetTierings(v []*Tiering) *IntelligentTieringConfiguration { + s.Tierings = v + return s +} + +// The Filter is used to identify objects that the S3 Intelligent-Tiering configuration +// applies to. +type IntelligentTieringFilter struct { + _ struct{} `type:"structure"` + + // A conjunction (logical AND) of predicates, which is used in evaluating a + // metrics filter. The operator must have at least two predicates, and an object + // must match all of the predicates in order for the filter to apply. + And *IntelligentTieringAndOperator `type:"structure"` + + // An object key name prefix that identifies the subset of objects to which + // the rule applies. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + Prefix *string `type:"string"` + + // A container of a key value name pair. + Tag *Tag `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IntelligentTieringFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IntelligentTieringFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntelligentTieringFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IntelligentTieringFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *IntelligentTieringFilter) SetAnd(v *IntelligentTieringAndOperator) *IntelligentTieringFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *IntelligentTieringFilter) SetPrefix(v string) *IntelligentTieringFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *IntelligentTieringFilter) SetTag(v *Tag) *IntelligentTieringFilter { + s.Tag = v + return s +} + +// Specifies the inventory configuration for an Amazon S3 bucket. For more information, +// see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) +// in the Amazon S3 API Reference. +type InventoryConfiguration struct { + _ struct{} `type:"structure"` + + // Contains information about where to publish the inventory results. + // + // Destination is a required field + Destination *InventoryDestination `type:"structure" required:"true"` + + // Specifies an inventory filter. The inventory only includes objects that meet + // the filter's criteria. + Filter *InventoryFilter `type:"structure"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // Object versions to include in the inventory list. If set to All, the list + // includes all the object versions, which adds the version-related fields VersionId, + // IsLatest, and DeleteMarker to the list. If set to Current, the list does + // not contain these version-related fields. + // + // IncludedObjectVersions is a required field + IncludedObjectVersions *string `type:"string" required:"true" enum:"InventoryIncludedObjectVersions"` + + // Specifies whether the inventory is enabled or disabled. If set to True, an + // inventory list is generated. If set to False, no inventory list is generated. + // + // IsEnabled is a required field + IsEnabled *bool `type:"boolean" required:"true"` + + // Contains the optional fields that are included in the inventory results. + OptionalFields []*string `locationNameList:"Field" type:"list" enum:"InventoryOptionalField"` + + // Specifies the schedule for generating inventory results. + // + // Schedule is a required field + Schedule *InventorySchedule `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryConfiguration"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.IncludedObjectVersions == nil { + invalidParams.Add(request.NewErrParamRequired("IncludedObjectVersions")) + } + if s.IsEnabled == nil { + invalidParams.Add(request.NewErrParamRequired("IsEnabled")) + } + if s.Schedule == nil { + invalidParams.Add(request.NewErrParamRequired("Schedule")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.Schedule != nil { + if err := s.Schedule.Validate(); err != nil { + invalidParams.AddNested("Schedule", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestination sets the Destination field's value. +func (s *InventoryConfiguration) SetDestination(v *InventoryDestination) *InventoryConfiguration { + s.Destination = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *InventoryConfiguration) SetFilter(v *InventoryFilter) *InventoryConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *InventoryConfiguration) SetId(v string) *InventoryConfiguration { + s.Id = &v + return s +} + +// SetIncludedObjectVersions sets the IncludedObjectVersions field's value. +func (s *InventoryConfiguration) SetIncludedObjectVersions(v string) *InventoryConfiguration { + s.IncludedObjectVersions = &v + return s +} + +// SetIsEnabled sets the IsEnabled field's value. +func (s *InventoryConfiguration) SetIsEnabled(v bool) *InventoryConfiguration { + s.IsEnabled = &v + return s +} + +// SetOptionalFields sets the OptionalFields field's value. +func (s *InventoryConfiguration) SetOptionalFields(v []*string) *InventoryConfiguration { + s.OptionalFields = v + return s +} + +// SetSchedule sets the Schedule field's value. +func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryConfiguration { + s.Schedule = v + return s +} + +// Specifies the inventory configuration for an Amazon S3 bucket. +type InventoryDestination struct { + _ struct{} `type:"structure"` + + // Contains the bucket name, file format, bucket owner (optional), and prefix + // (optional) where inventory results are published. + // + // S3BucketDestination is a required field + S3BucketDestination *InventoryS3BucketDestination `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryDestination"} + if s.S3BucketDestination == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) + } + if s.S3BucketDestination != nil { + if err := s.S3BucketDestination.Validate(); err != nil { + invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3BucketDestination sets the S3BucketDestination field's value. +func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestination) *InventoryDestination { + s.S3BucketDestination = v + return s +} + +// Contains the type of server-side encryption used to encrypt the inventory +// results. +type InventoryEncryption struct { + _ struct{} `type:"structure"` + + // Specifies the use of SSE-KMS to encrypt delivered inventory reports. + SSEKMS *SSEKMS `locationName:"SSE-KMS" type:"structure"` + + // Specifies the use of SSE-S3 to encrypt delivered inventory reports. + SSES3 *SSES3 `locationName:"SSE-S3" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryEncryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryEncryption) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryEncryption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryEncryption"} + if s.SSEKMS != nil { + if err := s.SSEKMS.Validate(); err != nil { + invalidParams.AddNested("SSEKMS", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSSEKMS sets the SSEKMS field's value. +func (s *InventoryEncryption) SetSSEKMS(v *SSEKMS) *InventoryEncryption { + s.SSEKMS = v + return s +} + +// SetSSES3 sets the SSES3 field's value. +func (s *InventoryEncryption) SetSSES3(v *SSES3) *InventoryEncryption { + s.SSES3 = v + return s +} + +// Specifies an inventory filter. The inventory only includes objects that meet +// the filter's criteria. +type InventoryFilter struct { + _ struct{} `type:"structure"` + + // The prefix that an object must have to be included in the inventory results. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryFilter"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter { + s.Prefix = &v + return s +} + +// Contains the bucket name, file format, bucket owner (optional), and prefix +// (optional) where inventory results are published. +type InventoryS3BucketDestination struct { + _ struct{} `type:"structure"` + + // The account ID that owns the destination S3 bucket. If no account ID is provided, + // the owner is not validated before exporting data. + // + // Although this value is optional, we strongly recommend that you set it to + // help prevent problems if the destination bucket ownership changes. + AccountId *string `type:"string"` + + // The Amazon Resource Name (ARN) of the bucket where inventory results will + // be published. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // Contains the type of server-side encryption used to encrypt the inventory + // results. + Encryption *InventoryEncryption `type:"structure"` + + // Specifies the output format of the inventory results. + // + // Format is a required field + Format *string `type:"string" required:"true" enum:"InventoryFormat"` + + // The prefix that is prepended to all inventory results. + Prefix *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryS3BucketDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryS3BucketDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryS3BucketDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryS3BucketDestination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Format == nil { + invalidParams.Add(request.NewErrParamRequired("Format")) + } + if s.Encryption != nil { + if err := s.Encryption.Validate(); err != nil { + invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *InventoryS3BucketDestination) SetAccountId(v string) *InventoryS3BucketDestination { + s.AccountId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *InventoryS3BucketDestination) SetBucket(v string) *InventoryS3BucketDestination { + s.Bucket = &v + return s +} + +func (s *InventoryS3BucketDestination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetEncryption sets the Encryption field's value. +func (s *InventoryS3BucketDestination) SetEncryption(v *InventoryEncryption) *InventoryS3BucketDestination { + s.Encryption = v + return s +} + +// SetFormat sets the Format field's value. +func (s *InventoryS3BucketDestination) SetFormat(v string) *InventoryS3BucketDestination { + s.Format = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDestination { + s.Prefix = &v + return s +} + +// Specifies the schedule for generating inventory results. +type InventorySchedule struct { + _ struct{} `type:"structure"` + + // Specifies how frequently inventory results are produced. + // + // Frequency is a required field + Frequency *string `type:"string" required:"true" enum:"InventoryFrequency"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventorySchedule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventorySchedule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventorySchedule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventorySchedule"} + if s.Frequency == nil { + invalidParams.Add(request.NewErrParamRequired("Frequency")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFrequency sets the Frequency field's value. +func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule { + s.Frequency = &v + return s +} + +// Specifies JSON as object's input serialization format. +type JSONInput struct { + _ struct{} `type:"structure"` + + // The type of JSON. Valid values: Document, Lines. + Type *string `type:"string" enum:"JSONType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s JSONInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s JSONInput) GoString() string { + return s.String() +} + +// SetType sets the Type field's value. +func (s *JSONInput) SetType(v string) *JSONInput { + s.Type = &v + return s +} + +// Specifies JSON as request's output serialization format. +type JSONOutput struct { + _ struct{} `type:"structure"` + + // The value used to separate individual records in the output. If no value + // is specified, Amazon S3 uses a newline character ('\n'). + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s JSONOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s JSONOutput) GoString() string { + return s.String() +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *JSONOutput) SetRecordDelimiter(v string) *JSONOutput { + s.RecordDelimiter = &v + return s +} + +// A container for object key name prefix and suffix filtering rules. +type KeyFilter struct { + _ struct{} `type:"structure"` + + // A list of containers for the key-value pair that defines the criteria for + // the filter rule. + FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s KeyFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s KeyFilter) GoString() string { + return s.String() +} + +// SetFilterRules sets the FilterRules field's value. +func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter { + s.FilterRules = v + return s +} + +// A container for specifying the configuration for Lambda notifications. +type LambdaFunctionConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket event for which to invoke the Lambda function. For more + // information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon S3 User Guide. + // + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true" enum:"Event"` + + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring event notifications using object key name filtering + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) + // in the Amazon S3 User Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Lambda function that Amazon S3 invokes + // when the specified event type occurs. + // + // LambdaFunctionArn is a required field + LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LambdaFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LambdaFunctionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LambdaFunctionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LambdaFunctionConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.LambdaFunctionArn == nil { + invalidParams.Add(request.NewErrParamRequired("LambdaFunctionArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *LambdaFunctionConfiguration) SetEvents(v []*string) *LambdaFunctionConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *LambdaFunctionConfiguration) SetFilter(v *NotificationConfigurationFilter) *LambdaFunctionConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *LambdaFunctionConfiguration) SetId(v string) *LambdaFunctionConfiguration { + s.Id = &v + return s +} + +// SetLambdaFunctionArn sets the LambdaFunctionArn field's value. +func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunctionConfiguration { + s.LambdaFunctionArn = &v + return s +} + +// Container for lifecycle rules. You can add as many as 1000 rules. +// +// For more information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) +// in the Amazon S3 User Guide. +type LifecycleConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies lifecycle configuration rules for an Amazon S3 bucket. + // + // Rules is a required field + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration { + s.Rules = v + return s +} + +// Container for the expiration for the lifecycle of the object. +// +// For more information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) +// in the Amazon S3 User Guide. +type LifecycleExpiration struct { + _ struct{} `type:"structure"` + + // Indicates at what date the object is to be moved or deleted. The date value + // must conform to the ISO 8601 format. The time is always midnight UTC. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` + + // Indicates whether Amazon S3 will remove a delete marker with no noncurrent + // versions. If set to true, the delete marker will be expired; if set to false + // the policy takes no action. This cannot be specified with Days or Date in + // a Lifecycle Expiration Policy. + ExpiredObjectDeleteMarker *bool `type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleExpiration) GoString() string { + return s.String() +} + +// SetDate sets the Date field's value. +func (s *LifecycleExpiration) SetDate(v time.Time) *LifecycleExpiration { + s.Date = &v + return s +} + +// SetDays sets the Days field's value. +func (s *LifecycleExpiration) SetDays(v int64) *LifecycleExpiration { + s.Days = &v + return s +} + +// SetExpiredObjectDeleteMarker sets the ExpiredObjectDeleteMarker field's value. +func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExpiration { + s.ExpiredObjectDeleteMarker = &v + return s +} + +// A lifecycle rule for individual objects in an Amazon S3 bucket. +// +// For more information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) +// in the Amazon S3 User Guide. +type LifecycleRule struct { + _ struct{} `type:"structure"` + + // Specifies the days since the initiation of an incomplete multipart upload + // that Amazon S3 will wait before permanently removing all parts of the upload. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon S3 User Guide. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + + // Specifies the expiration for the lifecycle of the object in the form of date, + // days and, whether the object has a delete marker. + Expiration *LifecycleExpiration `type:"structure"` + + // The Filter is used to identify objects that a Lifecycle Rule applies to. + // A Filter must have exactly one of Prefix, Tag, or And specified. Filter is + // required if the LifecycleRule does not contain a Prefix element. + Filter *LifecycleRuleFilter `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + // Specifies the transition rule for the lifecycle rule that describes when + // noncurrent objects transition to a specific storage class. If your bucket + // is versioning-enabled (or versioning is suspended), you can set this action + // to request that Amazon S3 transition noncurrent object versions to a specific + // storage class at a set period in the object's lifetime. + NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` + + // Prefix identifying one or more objects to which the rule applies. This is + // no longer used; use Filter instead. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Deprecated: Prefix has been deprecated + Prefix *string `deprecated:"true" type:"string"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + // Specifies when an Amazon S3 object transitions to a specified storage class. + Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. +func (s *LifecycleRule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *LifecycleRule { + s.AbortIncompleteMultipartUpload = v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *LifecycleRule) SetExpiration(v *LifecycleExpiration) *LifecycleRule { + s.Expiration = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *LifecycleRule) SetFilter(v *LifecycleRuleFilter) *LifecycleRule { + s.Filter = v + return s +} + +// SetID sets the ID field's value. +func (s *LifecycleRule) SetID(v string) *LifecycleRule { + s.ID = &v + return s +} + +// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. +func (s *LifecycleRule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *LifecycleRule { + s.NoncurrentVersionExpiration = v + return s +} + +// SetNoncurrentVersionTransitions sets the NoncurrentVersionTransitions field's value. +func (s *LifecycleRule) SetNoncurrentVersionTransitions(v []*NoncurrentVersionTransition) *LifecycleRule { + s.NoncurrentVersionTransitions = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRule) SetPrefix(v string) *LifecycleRule { + s.Prefix = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *LifecycleRule) SetStatus(v string) *LifecycleRule { + s.Status = &v + return s +} + +// SetTransitions sets the Transitions field's value. +func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule { + s.Transitions = v + return s +} + +// This is used in a Lifecycle Rule Filter to apply a logical AND to two or +// more predicates. The Lifecycle Rule will apply to any object matching all +// of the predicates configured inside the And operator. +type LifecycleRuleAndOperator struct { + _ struct{} `type:"structure"` + + // Minimum object size to which the rule applies. + ObjectSizeGreaterThan *int64 `type:"long"` + + // Maximum object size to which the rule applies. + ObjectSizeLessThan *int64 `type:"long"` + + // Prefix identifying one or more objects to which the rule applies. + Prefix *string `type:"string"` + + // All of these tags must exist in the object's tag set in order for the rule + // to apply. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleRuleAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleRuleAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRuleAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetObjectSizeGreaterThan sets the ObjectSizeGreaterThan field's value. +func (s *LifecycleRuleAndOperator) SetObjectSizeGreaterThan(v int64) *LifecycleRuleAndOperator { + s.ObjectSizeGreaterThan = &v + return s +} + +// SetObjectSizeLessThan sets the ObjectSizeLessThan field's value. +func (s *LifecycleRuleAndOperator) SetObjectSizeLessThan(v int64) *LifecycleRuleAndOperator { + s.ObjectSizeLessThan = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRuleAndOperator) SetPrefix(v string) *LifecycleRuleAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *LifecycleRuleAndOperator) SetTags(v []*Tag) *LifecycleRuleAndOperator { + s.Tags = v + return s +} + +// The Filter is used to identify objects that a Lifecycle Rule applies to. +// A Filter can have exactly one of Prefix, Tag, ObjectSizeGreaterThan, ObjectSizeLessThan, +// or And specified. If the Filter element is left empty, the Lifecycle Rule +// applies to all objects in the bucket. +type LifecycleRuleFilter struct { + _ struct{} `type:"structure"` + + // This is used in a Lifecycle Rule Filter to apply a logical AND to two or + // more predicates. The Lifecycle Rule will apply to any object matching all + // of the predicates configured inside the And operator. + And *LifecycleRuleAndOperator `type:"structure"` + + // Minimum object size to which the rule applies. + ObjectSizeGreaterThan *int64 `type:"long"` + + // Maximum object size to which the rule applies. + ObjectSizeLessThan *int64 `type:"long"` + + // Prefix identifying one or more objects to which the rule applies. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + Prefix *string `type:"string"` + + // This tag must exist in the object's tag set in order for the rule to apply. + Tag *Tag `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleRuleFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleRuleFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRuleFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *LifecycleRuleFilter) SetAnd(v *LifecycleRuleAndOperator) *LifecycleRuleFilter { + s.And = v + return s +} + +// SetObjectSizeGreaterThan sets the ObjectSizeGreaterThan field's value. +func (s *LifecycleRuleFilter) SetObjectSizeGreaterThan(v int64) *LifecycleRuleFilter { + s.ObjectSizeGreaterThan = &v + return s +} + +// SetObjectSizeLessThan sets the ObjectSizeLessThan field's value. +func (s *LifecycleRuleFilter) SetObjectSizeLessThan(v int64) *LifecycleRuleFilter { + s.ObjectSizeLessThan = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRuleFilter) SetPrefix(v string) *LifecycleRuleFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter { + s.Tag = v + return s +} + +type ListBucketAnalyticsConfigurationsInput struct { + _ struct{} `locationName:"ListBucketAnalyticsConfigurationsRequest" type:"structure"` + + // The name of the bucket from which analytics configurations are retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketAnalyticsConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketAnalyticsConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketAnalyticsConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketAnalyticsConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketAnalyticsConfigurationsInput) SetBucket(v string) *ListBucketAnalyticsConfigurationsInput { + s.Bucket = &v + return s +} + +func (s *ListBucketAnalyticsConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsInput { + s.ContinuationToken = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListBucketAnalyticsConfigurationsInput) SetExpectedBucketOwner(v string) *ListBucketAnalyticsConfigurationsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *ListBucketAnalyticsConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketAnalyticsConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListBucketAnalyticsConfigurationsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListBucketAnalyticsConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The list of analytics configurations for a bucket. + AnalyticsConfigurationList []*AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"list" flattened:"true"` + + // The marker that is used as a starting point for this analytics configuration + // list response. This value is present if it was sent in the request. + ContinuationToken *string `type:"string"` + + // Indicates whether the returned list of analytics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // NextContinuationToken is sent when isTruncated is true, which indicates that + // there are more analytics configurations to list. The next request must include + // this NextContinuationToken. The token is obfuscated and is not a usable value. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketAnalyticsConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketAnalyticsConfigurationsOutput) GoString() string { + return s.String() +} + +// SetAnalyticsConfigurationList sets the AnalyticsConfigurationList field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetAnalyticsConfigurationList(v []*AnalyticsConfiguration) *ListBucketAnalyticsConfigurationsOutput { + s.AnalyticsConfigurationList = v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketAnalyticsConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +type ListBucketIntelligentTieringConfigurationsInput struct { + _ struct{} `locationName:"ListBucketIntelligentTieringConfigurationsRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketIntelligentTieringConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketIntelligentTieringConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketIntelligentTieringConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketIntelligentTieringConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketIntelligentTieringConfigurationsInput) SetBucket(v string) *ListBucketIntelligentTieringConfigurationsInput { + s.Bucket = &v + return s +} + +func (s *ListBucketIntelligentTieringConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketIntelligentTieringConfigurationsInput) SetContinuationToken(v string) *ListBucketIntelligentTieringConfigurationsInput { + s.ContinuationToken = &v + return s +} + +func (s *ListBucketIntelligentTieringConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketIntelligentTieringConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListBucketIntelligentTieringConfigurationsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListBucketIntelligentTieringConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string `type:"string"` + + // The list of S3 Intelligent-Tiering configurations for a bucket. + IntelligentTieringConfigurationList []*IntelligentTieringConfiguration `locationName:"IntelligentTieringConfiguration" type:"list" flattened:"true"` + + // Indicates whether the returned list of analytics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // The marker used to continue this inventory configuration listing. Use the + // NextContinuationToken from this response to continue the listing in a subsequent + // request. The continuation token is an opaque value that Amazon S3 understands. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketIntelligentTieringConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketIntelligentTieringConfigurationsOutput) GoString() string { + return s.String() +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetContinuationToken(v string) *ListBucketIntelligentTieringConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetIntelligentTieringConfigurationList sets the IntelligentTieringConfigurationList field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetIntelligentTieringConfigurationList(v []*IntelligentTieringConfiguration) *ListBucketIntelligentTieringConfigurationsOutput { + s.IntelligentTieringConfigurationList = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetIsTruncated(v bool) *ListBucketIntelligentTieringConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketIntelligentTieringConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +type ListBucketInventoryConfigurationsInput struct { + _ struct{} `locationName:"ListBucketInventoryConfigurationsRequest" type:"structure"` + + // The name of the bucket containing the inventory configurations to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The marker used to continue an inventory configuration listing that has been + // truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value + // that Amazon S3 understands. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketInventoryConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketInventoryConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketInventoryConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketInventoryConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketInventoryConfigurationsInput) SetBucket(v string) *ListBucketInventoryConfigurationsInput { + s.Bucket = &v + return s +} + +func (s *ListBucketInventoryConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsInput { + s.ContinuationToken = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListBucketInventoryConfigurationsInput) SetExpectedBucketOwner(v string) *ListBucketInventoryConfigurationsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *ListBucketInventoryConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketInventoryConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListBucketInventoryConfigurationsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListBucketInventoryConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // If sent in the request, the marker that is used as a starting point for this + // inventory configuration list response. + ContinuationToken *string `type:"string"` + + // The list of inventory configurations for a bucket. + InventoryConfigurationList []*InventoryConfiguration `locationName:"InventoryConfiguration" type:"list" flattened:"true"` + + // Tells whether the returned list of inventory configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // is provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // The marker used to continue this inventory configuration listing. Use the + // NextContinuationToken from this response to continue the listing in a subsequent + // request. The continuation token is an opaque value that Amazon S3 understands. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketInventoryConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketInventoryConfigurationsOutput) GoString() string { + return s.String() +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetInventoryConfigurationList sets the InventoryConfigurationList field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetInventoryConfigurationList(v []*InventoryConfiguration) *ListBucketInventoryConfigurationsOutput { + s.InventoryConfigurationList = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetIsTruncated(v bool) *ListBucketInventoryConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +type ListBucketMetricsConfigurationsInput struct { + _ struct{} `locationName:"ListBucketMetricsConfigurationsRequest" type:"structure"` + + // The name of the bucket containing the metrics configurations to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The marker that is used to continue a metrics configuration listing that + // has been truncated. Use the NextContinuationToken from a previously truncated + // list response to continue the listing. The continuation token is an opaque + // value that Amazon S3 understands. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketMetricsConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketMetricsConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketMetricsConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketMetricsConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketMetricsConfigurationsInput) SetBucket(v string) *ListBucketMetricsConfigurationsInput { + s.Bucket = &v + return s +} + +func (s *ListBucketMetricsConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsInput { + s.ContinuationToken = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListBucketMetricsConfigurationsInput) SetExpectedBucketOwner(v string) *ListBucketMetricsConfigurationsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *ListBucketMetricsConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketMetricsConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListBucketMetricsConfigurationsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListBucketMetricsConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The marker that is used as a starting point for this metrics configuration + // list response. This value is present if it was sent in the request. + ContinuationToken *string `type:"string"` + + // Indicates whether the returned list of metrics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // The list of metrics configurations for a bucket. + MetricsConfigurationList []*MetricsConfiguration `locationName:"MetricsConfiguration" type:"list" flattened:"true"` + + // The marker used to continue a metrics configuration listing that has been + // truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value + // that Amazon S3 understands. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketMetricsConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketMetricsConfigurationsOutput) GoString() string { + return s.String() +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketMetricsConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetMetricsConfigurationList sets the MetricsConfigurationList field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetMetricsConfigurationList(v []*MetricsConfiguration) *ListBucketMetricsConfigurationsOutput { + s.MetricsConfigurationList = v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +type ListBucketsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketsInput) GoString() string { + return s.String() +} + +type ListBucketsOutput struct { + _ struct{} `type:"structure"` + + // The list of buckets owned by the requester. + Buckets []*Bucket `locationNameList:"Bucket" type:"list"` + + // The owner of the buckets listed. + Owner *Owner `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketsOutput) GoString() string { + return s.String() +} + +// SetBuckets sets the Buckets field's value. +func (s *ListBucketsOutput) SetBuckets(v []*Bucket) *ListBucketsOutput { + s.Buckets = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { + s.Owner = v + return s +} + +type ListDirectoryBucketsInput struct { + _ struct{} `locationName:"ListDirectoryBucketsRequest" type:"structure"` + + // ContinuationToken indicates to Amazon S3 that the list is being continued + // on this bucket with a token. ContinuationToken is obfuscated and is not a + // real key. You can use this ContinuationToken for pagination of the list results. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // Maximum number of buckets to be returned in response. When the number is + // more than the count of buckets that are owned by an Amazon Web Services account, + // return all the buckets in response. + MaxDirectoryBuckets *int64 `location:"querystring" locationName:"max-directory-buckets" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDirectoryBucketsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDirectoryBucketsInput) GoString() string { + return s.String() +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListDirectoryBucketsInput) SetContinuationToken(v string) *ListDirectoryBucketsInput { + s.ContinuationToken = &v + return s +} + +// SetMaxDirectoryBuckets sets the MaxDirectoryBuckets field's value. +func (s *ListDirectoryBucketsInput) SetMaxDirectoryBuckets(v int64) *ListDirectoryBucketsInput { + s.MaxDirectoryBuckets = &v + return s +} + +type ListDirectoryBucketsOutput struct { + _ struct{} `type:"structure"` + + // The list of buckets owned by the requester. + Buckets []*Bucket `locationNameList:"Bucket" type:"list"` + + // If ContinuationToken was sent with the request, it is included in the response. + // You can use the returned ContinuationToken for pagination of the list response. + ContinuationToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDirectoryBucketsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDirectoryBucketsOutput) GoString() string { + return s.String() +} + +// SetBuckets sets the Buckets field's value. +func (s *ListDirectoryBucketsOutput) SetBuckets(v []*Bucket) *ListDirectoryBucketsOutput { + s.Buckets = v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListDirectoryBucketsOutput) SetContinuationToken(v string) *ListDirectoryBucketsOutput { + s.ContinuationToken = &v + return s +} + +type ListMultipartUploadsInput struct { + _ struct{} `locationName:"ListMultipartUploadsRequest" type:"structure"` + + // The name of the bucket to which the multipart upload was initiated. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Character you use to group keys. + // + // All keys that contain the same string between the prefix, if specified, and + // the first occurrence of the delimiter after the prefix are grouped under + // a single result element, CommonPrefixes. If you don't specify the prefix + // parameter, then the substring starts at the beginning of the key. The keys + // that are grouped under CommonPrefixes result element are not returned elsewhere + // in the response. + // + // Directory buckets - For directory buckets, / is the only supported delimiter. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key can contain any Unicode character; + // however, the XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Specifies the multipart upload after which listing should begin. + // + // * General purpose buckets - For general purpose buckets, key-marker is + // an object key. Together with upload-id-marker, this parameter specifies + // the multipart upload after which listing should begin. If upload-id-marker + // is not specified, only the keys lexicographically greater than the specified + // key-marker will be included in the list. If upload-id-marker is specified, + // any multipart uploads for a key equal to the key-marker might also be + // included, provided those multipart uploads have upload IDs lexicographically + // greater than the specified upload-id-marker. + // + // * Directory buckets - For directory buckets, key-marker is obfuscated + // and isn't a real object key. The upload-id-marker parameter isn't supported + // by directory buckets. To list the additional multipart uploads, you only + // need to set the value of key-marker to the NextKeyMarker value from the + // previous response. In the ListMultipartUploads response, the multipart + // uploads aren't sorted lexicographically based on the object keys. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of multipart uploads, from 1 to 1,000, to return + // in the response body. 1,000 is the maximum number of uploads that can be + // returned in a response. + MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` + + // Lists in-progress uploads only for those keys that begin with the specified + // prefix. You can use prefixes to separate a bucket into different grouping + // of keys. (You can think of using prefix to make groups in the same way that + // you'd use a folder in a file system.) + // + // Directory buckets - For directory buckets, only prefixes that end in a delimiter + // (/) are supported. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter + // is ignored. Otherwise, any multipart uploads for a key equal to the key-marker + // might be included in the list only if they have an upload ID lexicographically + // greater than the specified upload-id-marker. + // + // This functionality is not supported for directory buckets. + UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMultipartUploadsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMultipartUploadsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListMultipartUploadsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListMultipartUploadsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListMultipartUploadsInput) SetBucket(v string) *ListMultipartUploadsInput { + s.Bucket = &v + return s +} + +func (s *ListMultipartUploadsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListMultipartUploadsInput) SetDelimiter(v string) *ListMultipartUploadsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListMultipartUploadsInput) SetEncodingType(v string) *ListMultipartUploadsInput { + s.EncodingType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListMultipartUploadsInput) SetExpectedBucketOwner(v string) *ListMultipartUploadsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListMultipartUploadsInput) SetKeyMarker(v string) *ListMultipartUploadsInput { + s.KeyMarker = &v + return s +} + +// SetMaxUploads sets the MaxUploads field's value. +func (s *ListMultipartUploadsInput) SetMaxUploads(v int64) *ListMultipartUploadsInput { + s.MaxUploads = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListMultipartUploadsInput) SetPrefix(v string) *ListMultipartUploadsInput { + s.Prefix = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListMultipartUploadsInput) SetRequestPayer(v string) *ListMultipartUploadsInput { + s.RequestPayer = &v + return s +} + +// SetUploadIdMarker sets the UploadIdMarker field's value. +func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUploadsInput { + s.UploadIdMarker = &v + return s +} + +func (s *ListMultipartUploadsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListMultipartUploadsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListMultipartUploadsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListMultipartUploadsOutput struct { + _ struct{} `type:"structure"` + + // The name of the bucket to which the multipart upload was initiated. Does + // not return the access point ARN or access point alias if used. + Bucket *string `type:"string"` + + // If you specify a delimiter in the request, then the result returns each distinct + // key prefix containing the delimiter in a CommonPrefixes element. The distinct + // key prefixes are returned in the Prefix child element. + // + // Directory buckets - For directory buckets, only prefixes that end in a delimiter + // (/) are supported. + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Contains the delimiter you specified in the request. If you don't specify + // a delimiter in your request, this element is absent from the response. + // + // Directory buckets - For directory buckets, / is the only supported delimiter. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + // + // If you specify the encoding-type request parameter, Amazon S3 includes this + // element in the response, and returns encoded key name values in the following + // response elements: + // + // Delimiter, KeyMarker, Prefix, NextKeyMarker, Key. + EncodingType *string `type:"string" enum:"EncodingType"` + + // Indicates whether the returned list of multipart uploads is truncated. A + // value of true indicates that the list was truncated. The list can be truncated + // if the number of multipart uploads exceeds the limit allowed or specified + // by max uploads. + IsTruncated *bool `type:"boolean"` + + // The key at or after which the listing began. + KeyMarker *string `type:"string"` + + // Maximum number of multipart uploads that could have been included in the + // response. + MaxUploads *int64 `type:"integer"` + + // When a list is truncated, this element specifies the value that should be + // used for the key-marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // When a list is truncated, this element specifies the value that should be + // used for the upload-id-marker request parameter in a subsequent request. + // + // This functionality is not supported for directory buckets. + NextUploadIdMarker *string `type:"string"` + + // When a prefix is provided in the request, this field contains the specified + // prefix. The result contains only keys starting with the specified prefix. + // + // Directory buckets - For directory buckets, only prefixes that end in a delimiter + // (/) are supported. + Prefix *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter + // is ignored. Otherwise, any multipart uploads for a key equal to the key-marker + // might be included in the list only if they have an upload ID lexicographically + // greater than the specified upload-id-marker. + // + // This functionality is not supported for directory buckets. + UploadIdMarker *string `type:"string"` + + // Container for elements related to a particular multipart upload. A response + // can contain zero or more Upload elements. + Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMultipartUploadsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMultipartUploadsOutput) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *ListMultipartUploadsOutput) SetBucket(v string) *ListMultipartUploadsOutput { + s.Bucket = &v + return s +} + +func (s *ListMultipartUploadsOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListMultipartUploadsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListMultipartUploadsOutput { + s.CommonPrefixes = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListMultipartUploadsOutput) SetDelimiter(v string) *ListMultipartUploadsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListMultipartUploadsOutput) SetEncodingType(v string) *ListMultipartUploadsOutput { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListMultipartUploadsOutput) SetIsTruncated(v bool) *ListMultipartUploadsOutput { + s.IsTruncated = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListMultipartUploadsOutput) SetKeyMarker(v string) *ListMultipartUploadsOutput { + s.KeyMarker = &v + return s +} + +// SetMaxUploads sets the MaxUploads field's value. +func (s *ListMultipartUploadsOutput) SetMaxUploads(v int64) *ListMultipartUploadsOutput { + s.MaxUploads = &v + return s +} + +// SetNextKeyMarker sets the NextKeyMarker field's value. +func (s *ListMultipartUploadsOutput) SetNextKeyMarker(v string) *ListMultipartUploadsOutput { + s.NextKeyMarker = &v + return s +} + +// SetNextUploadIdMarker sets the NextUploadIdMarker field's value. +func (s *ListMultipartUploadsOutput) SetNextUploadIdMarker(v string) *ListMultipartUploadsOutput { + s.NextUploadIdMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListMultipartUploadsOutput) SetPrefix(v string) *ListMultipartUploadsOutput { + s.Prefix = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *ListMultipartUploadsOutput) SetRequestCharged(v string) *ListMultipartUploadsOutput { + s.RequestCharged = &v + return s +} + +// SetUploadIdMarker sets the UploadIdMarker field's value. +func (s *ListMultipartUploadsOutput) SetUploadIdMarker(v string) *ListMultipartUploadsOutput { + s.UploadIdMarker = &v + return s +} + +// SetUploads sets the Uploads field's value. +func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMultipartUploadsOutput { + s.Uploads = v + return s +} + +type ListObjectVersionsInput struct { + _ struct{} `locationName:"ListObjectVersionsRequest" type:"structure"` + + // The bucket name that contains the objects. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character that you specify to group keys. All keys that + // contain the same string between the prefix and the first occurrence of the + // delimiter are grouped under a single result element in CommonPrefixes. These + // groups are counted as one result against the max-keys limitation. These keys + // are not returned elsewhere in the response. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key can contain any Unicode character; + // however, the XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Specifies the key to start with when listing objects in a bucket. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of keys returned in the response. By default, the + // action returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. If additional keys satisfy the search criteria, + // but were not returned because max-keys was exceeded, the response contains + // true. To return the additional keys, see key-marker + // and version-id-marker. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Specifies the optional fields that you want returned in the response. Fields + // that you do not specify are not returned. + OptionalObjectAttributes []*string `location:"header" locationName:"x-amz-optional-object-attributes" type:"list" enum:"OptionalObjectAttributes"` + + // Use this parameter to select only those keys that begin with the specified + // prefix. You can use prefixes to separate a bucket into different groupings + // of keys. (You can think of using prefix to make groups in the same way that + // you'd use a folder in a file system.) You can use prefix with delimiter to + // roll up numerous objects into a single result under CommonPrefixes. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the object version you want to start listing from. + VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectVersionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectVersionsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectVersionsInput) SetBucket(v string) *ListObjectVersionsInput { + s.Bucket = &v + return s +} + +func (s *ListObjectVersionsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectVersionsInput) SetDelimiter(v string) *ListObjectVersionsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectVersionsInput) SetEncodingType(v string) *ListObjectVersionsInput { + s.EncodingType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListObjectVersionsInput) SetExpectedBucketOwner(v string) *ListObjectVersionsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListObjectVersionsInput) SetKeyMarker(v string) *ListObjectVersionsInput { + s.KeyMarker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectVersionsInput) SetMaxKeys(v int64) *ListObjectVersionsInput { + s.MaxKeys = &v + return s +} + +// SetOptionalObjectAttributes sets the OptionalObjectAttributes field's value. +func (s *ListObjectVersionsInput) SetOptionalObjectAttributes(v []*string) *ListObjectVersionsInput { + s.OptionalObjectAttributes = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectVersionsInput) SetPrefix(v string) *ListObjectVersionsInput { + s.Prefix = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListObjectVersionsInput) SetRequestPayer(v string) *ListObjectVersionsInput { + s.RequestPayer = &v + return s +} + +// SetVersionIdMarker sets the VersionIdMarker field's value. +func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersionsInput { + s.VersionIdMarker = &v + return s +} + +func (s *ListObjectVersionsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListObjectVersionsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListObjectVersionsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListObjectVersionsOutput struct { + _ struct{} `type:"structure"` + + // All of the keys rolled up into a common prefix count as a single return when + // calculating the number of returns. + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Container for an object that is a delete marker. + DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` + + // The delimiter grouping the included keys. A delimiter is a character that + // you specify to group keys. All keys that contain the same string between + // the prefix and the first occurrence of the delimiter are grouped under a + // single result element in CommonPrefixes. These groups are counted as one + // result against the max-keys limitation. These keys are not returned elsewhere + // in the response. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // + // If you specify the encoding-type request parameter, Amazon S3 includes this + // element in the response, and returns encoded key name values in the following + // response elements: + // + // KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria. If your results were truncated, you can make + // a follow-up paginated request by using the NextKeyMarker and NextVersionIdMarker + // response parameters as a starting place in another request to return the + // rest of the results. + IsTruncated *bool `type:"boolean"` + + // Marks the last key returned in a truncated response. + KeyMarker *string `type:"string"` + + // Specifies the maximum number of objects to return. + MaxKeys *int64 `type:"integer"` + + // The bucket name. + Name *string `type:"string"` + + // When the number of responses exceeds the value of MaxKeys, NextKeyMarker + // specifies the first key not returned that satisfies the search criteria. + // Use this value for the key-marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // When the number of responses exceeds the value of MaxKeys, NextVersionIdMarker + // specifies the first object version not returned that satisfies the search + // criteria. Use this value for the version-id-marker request parameter in a + // subsequent request. + NextVersionIdMarker *string `type:"string"` + + // Selects objects that start with the value supplied by this parameter. + Prefix *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Marks the last version of the key returned in a truncated response. + VersionIdMarker *string `type:"string"` + + // Container for version information. + Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectVersionsOutput) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectVersionsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectVersionsOutput { + s.CommonPrefixes = v + return s +} + +// SetDeleteMarkers sets the DeleteMarkers field's value. +func (s *ListObjectVersionsOutput) SetDeleteMarkers(v []*DeleteMarkerEntry) *ListObjectVersionsOutput { + s.DeleteMarkers = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectVersionsOutput) SetDelimiter(v string) *ListObjectVersionsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectVersionsOutput) SetEncodingType(v string) *ListObjectVersionsOutput { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectVersionsOutput) SetIsTruncated(v bool) *ListObjectVersionsOutput { + s.IsTruncated = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListObjectVersionsOutput) SetKeyMarker(v string) *ListObjectVersionsOutput { + s.KeyMarker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectVersionsOutput) SetMaxKeys(v int64) *ListObjectVersionsOutput { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectVersionsOutput) SetName(v string) *ListObjectVersionsOutput { + s.Name = &v + return s +} + +// SetNextKeyMarker sets the NextKeyMarker field's value. +func (s *ListObjectVersionsOutput) SetNextKeyMarker(v string) *ListObjectVersionsOutput { + s.NextKeyMarker = &v + return s +} + +// SetNextVersionIdMarker sets the NextVersionIdMarker field's value. +func (s *ListObjectVersionsOutput) SetNextVersionIdMarker(v string) *ListObjectVersionsOutput { + s.NextVersionIdMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectVersionsOutput) SetPrefix(v string) *ListObjectVersionsOutput { + s.Prefix = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *ListObjectVersionsOutput) SetRequestCharged(v string) *ListObjectVersionsOutput { + s.RequestCharged = &v + return s +} + +// SetVersionIdMarker sets the VersionIdMarker field's value. +func (s *ListObjectVersionsOutput) SetVersionIdMarker(v string) *ListObjectVersionsOutput { + s.VersionIdMarker = &v + return s +} + +// SetVersions sets the Versions field's value. +func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVersionsOutput { + s.Versions = v + return s +} + +type ListObjectsInput struct { + _ struct{} `locationName:"ListObjectsRequest" type:"structure"` + + // The name of the bucket containing the objects. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character that you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key can contain any Unicode character; + // however, the XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Marker is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. Marker can be any key in the bucket. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Sets the maximum number of keys returned in the response. By default, the + // action returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Specifies the optional fields that you want returned in the response. Fields + // that you do not specify are not returned. + OptionalObjectAttributes []*string `location:"header" locationName:"x-amz-optional-object-attributes" type:"list" enum:"OptionalObjectAttributes"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // list objects request. Bucket owners need not specify this parameter in their + // requests. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectsInput) SetBucket(v string) *ListObjectsInput { + s.Bucket = &v + return s +} + +func (s *ListObjectsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsInput) SetDelimiter(v string) *ListObjectsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsInput) SetEncodingType(v string) *ListObjectsInput { + s.EncodingType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListObjectsInput) SetExpectedBucketOwner(v string) *ListObjectsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListObjectsInput) SetMarker(v string) *ListObjectsInput { + s.Marker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsInput) SetMaxKeys(v int64) *ListObjectsInput { + s.MaxKeys = &v + return s +} + +// SetOptionalObjectAttributes sets the OptionalObjectAttributes field's value. +func (s *ListObjectsInput) SetOptionalObjectAttributes(v []*string) *ListObjectsInput { + s.OptionalObjectAttributes = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsInput) SetPrefix(v string) *ListObjectsInput { + s.Prefix = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput { + s.RequestPayer = &v + return s +} + +func (s *ListObjectsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListObjectsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListObjectsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListObjectsOutput struct { + _ struct{} `type:"structure"` + + // All of the keys (up to 1,000) rolled up in a common prefix count as a single + // return when calculating the number of returns. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // + // CommonPrefixes contains all (if there are any) keys between Prefix and the + // next occurrence of the string specified by the delimiter. + // + // CommonPrefixes lists keys that act like subdirectories in the directory specified + // by Prefix. + // + // For example, if the prefix is notes/ and the delimiter is a slash (/), as + // in notes/summer/july, the common prefix is notes/summer/. All of the keys + // that roll up into a common prefix count as a single return when calculating + // the number of returns. + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Metadata about each object returned. + Contents []*Object `type:"list" flattened:"true"` + + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element + // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere + // in the response. Each rolled-up result counts as only one return against + // the MaxKeys value. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. If + // using url, non-ASCII characters used in an object's key name will be URL + // encoded. For example, the object test_file(3).png will appear as test_file%283%29.png. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria. + IsTruncated *bool `type:"boolean"` + + // Indicates where in the bucket listing begins. Marker is included in the response + // if it was sent with the request. + Marker *string `type:"string"` + + // The maximum number of keys returned in the response body. + MaxKeys *int64 `type:"integer"` + + // The bucket name. + Name *string `type:"string"` + + // When the response is truncated (the IsTruncated element value in the response + // is true), you can use the key name in this field as the marker parameter + // in the subsequent request to get the next set of objects. Amazon S3 lists + // objects in alphabetical order. + // + // This element is returned only if you have the delimiter request parameter + // specified. If the response does not include the NextMarker element and it + // is truncated, you can use the value of the last Key element in the response + // as the marker parameter in the subsequent request to get the next set of + // object keys. + NextMarker *string `type:"string"` + + // Keys that begin with the indicated prefix. + Prefix *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectsOutput) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsOutput { + s.CommonPrefixes = v + return s +} + +// SetContents sets the Contents field's value. +func (s *ListObjectsOutput) SetContents(v []*Object) *ListObjectsOutput { + s.Contents = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsOutput) SetDelimiter(v string) *ListObjectsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsOutput) SetEncodingType(v string) *ListObjectsOutput { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectsOutput) SetIsTruncated(v bool) *ListObjectsOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListObjectsOutput) SetMarker(v string) *ListObjectsOutput { + s.Marker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsOutput) SetMaxKeys(v int64) *ListObjectsOutput { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectsOutput) SetName(v string) *ListObjectsOutput { + s.Name = &v + return s +} + +// SetNextMarker sets the NextMarker field's value. +func (s *ListObjectsOutput) SetNextMarker(v string) *ListObjectsOutput { + s.NextMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput { + s.Prefix = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *ListObjectsOutput) SetRequestCharged(v string) *ListObjectsOutput { + s.RequestCharged = &v + return s +} + +type ListObjectsV2Input struct { + _ struct{} `locationName:"ListObjectsV2Request" type:"structure"` + + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // ContinuationToken indicates to Amazon S3 that the list is being continued + // on this bucket with a token. ContinuationToken is obfuscated and is not a + // real key. You can use this ContinuationToken for pagination of the list results. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // A delimiter is a character that you use to group keys. + // + // * Directory buckets - For directory buckets, / is the only supported delimiter. + // + // * Directory buckets - When you query ListObjectsV2 with a delimiter during + // in-progress multipart uploads, the CommonPrefixes response parameter contains + // the prefixes that are associated with the in-progress multipart uploads. + // For more information about multipart uploads, see Multipart Upload Overview + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in + // the Amazon S3 User Guide. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. If + // using url, non-ASCII characters used in an object's key name will be URL + // encoded. For example, the object test_file(3).png will appear as test_file%283%29.png. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The owner field is not present in ListObjectsV2 by default. If you want to + // return the owner field with each key in the result, then set the FetchOwner + // field to true. + // + // Directory buckets - For directory buckets, the bucket owner is returned as + // the object owner for all objects. + FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` + + // Sets the maximum number of keys returned in the response. By default, the + // action returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Specifies the optional fields that you want returned in the response. Fields + // that you do not specify are not returned. + // + // This functionality is not supported for directory buckets. + OptionalObjectAttributes []*string `location:"header" locationName:"x-amz-optional-object-attributes" type:"list" enum:"OptionalObjectAttributes"` + + // Limits the response to keys that begin with the specified prefix. + // + // Directory buckets - For directory buckets, only prefixes that end in a delimiter + // (/) are supported. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // list objects request in V2 style. Bucket owners need not specify this parameter + // in their requests. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. StartAfter can be any key in the bucket. + // + // This functionality is not supported for directory buckets. + StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectsV2Input) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectsV2Input) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectsV2Input) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsV2Input"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectsV2Input) SetBucket(v string) *ListObjectsV2Input { + s.Bucket = &v + return s +} + +func (s *ListObjectsV2Input) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListObjectsV2Input) SetContinuationToken(v string) *ListObjectsV2Input { + s.ContinuationToken = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsV2Input) SetDelimiter(v string) *ListObjectsV2Input { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsV2Input) SetEncodingType(v string) *ListObjectsV2Input { + s.EncodingType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListObjectsV2Input) SetExpectedBucketOwner(v string) *ListObjectsV2Input { + s.ExpectedBucketOwner = &v + return s +} + +// SetFetchOwner sets the FetchOwner field's value. +func (s *ListObjectsV2Input) SetFetchOwner(v bool) *ListObjectsV2Input { + s.FetchOwner = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsV2Input) SetMaxKeys(v int64) *ListObjectsV2Input { + s.MaxKeys = &v + return s +} + +// SetOptionalObjectAttributes sets the OptionalObjectAttributes field's value. +func (s *ListObjectsV2Input) SetOptionalObjectAttributes(v []*string) *ListObjectsV2Input { + s.OptionalObjectAttributes = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsV2Input) SetPrefix(v string) *ListObjectsV2Input { + s.Prefix = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListObjectsV2Input) SetRequestPayer(v string) *ListObjectsV2Input { + s.RequestPayer = &v + return s +} + +// SetStartAfter sets the StartAfter field's value. +func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input { + s.StartAfter = &v + return s +} + +func (s *ListObjectsV2Input) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListObjectsV2Input) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListObjectsV2Input) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListObjectsV2Output struct { + _ struct{} `type:"structure"` + + // All of the keys (up to 1,000) that share the same prefix are grouped together. + // When counting the total numbers of returns by this API operation, this group + // of keys is considered as one item. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // + // CommonPrefixes contains all (if there are any) keys between Prefix and the + // next occurrence of the string specified by a delimiter. + // + // CommonPrefixes lists keys that act like subdirectories in the directory specified + // by Prefix. + // + // For example, if the prefix is notes/ and the delimiter is a slash (/) as + // in notes/summer/july, the common prefix is notes/summer/. All of the keys + // that roll up into a common prefix count as a single return when calculating + // the number of returns. + // + // * Directory buckets - For directory buckets, only prefixes that end in + // a delimiter (/) are supported. + // + // * Directory buckets - When you query ListObjectsV2 with a delimiter during + // in-progress multipart uploads, the CommonPrefixes response parameter contains + // the prefixes that are associated with the in-progress multipart uploads. + // For more information about multipart uploads, see Multipart Upload Overview + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in + // the Amazon S3 User Guide. + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Metadata about each object returned. + Contents []*Object `type:"list" flattened:"true"` + + // If ContinuationToken was sent with the request, it is included in the response. + // You can use the returned ContinuationToken for pagination of the list response. + // You can use this ContinuationToken for pagination of the list results. + ContinuationToken *string `type:"string"` + + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element + // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere + // in the response. Each rolled-up result counts as only one return against + // the MaxKeys value. + // + // Directory buckets - For directory buckets, / is the only supported delimiter. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // + // If you specify the encoding-type request parameter, Amazon S3 includes this + // element in the response, and returns encoded key name values in the following + // response elements: + // + // Delimiter, Prefix, Key, and StartAfter. + EncodingType *string `type:"string" enum:"EncodingType"` + + // Set to false if all of the results were returned. Set to true if more keys + // are available to return. If the number of results exceeds that specified + // by MaxKeys, all of the results might not be returned. + IsTruncated *bool `type:"boolean"` + + // KeyCount is the number of keys returned with this request. KeyCount will + // always be less than or equal to the MaxKeys field. For example, if you ask + // for 50 keys, your result will include 50 keys or fewer. + KeyCount *int64 `type:"integer"` + + // Sets the maximum number of keys returned in the response. By default, the + // action returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. + MaxKeys *int64 `type:"integer"` + + // The bucket name. + Name *string `type:"string"` + + // NextContinuationToken is sent when isTruncated is true, which means there + // are more keys in the bucket that can be listed. The next list requests to + // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken + // is obfuscated and is not a real key + NextContinuationToken *string `type:"string"` + + // Keys that begin with the indicated prefix. + // + // Directory buckets - For directory buckets, only prefixes that end in a delimiter + // (/) are supported. + Prefix *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If StartAfter was sent with the request, it is included in the response. + // + // This functionality is not supported for directory buckets. + StartAfter *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectsV2Output) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectsV2Output) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectsV2Output) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsV2Output { + s.CommonPrefixes = v + return s +} + +// SetContents sets the Contents field's value. +func (s *ListObjectsV2Output) SetContents(v []*Object) *ListObjectsV2Output { + s.Contents = v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListObjectsV2Output) SetContinuationToken(v string) *ListObjectsV2Output { + s.ContinuationToken = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsV2Output) SetDelimiter(v string) *ListObjectsV2Output { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsV2Output) SetEncodingType(v string) *ListObjectsV2Output { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectsV2Output) SetIsTruncated(v bool) *ListObjectsV2Output { + s.IsTruncated = &v + return s +} + +// SetKeyCount sets the KeyCount field's value. +func (s *ListObjectsV2Output) SetKeyCount(v int64) *ListObjectsV2Output { + s.KeyCount = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsV2Output) SetMaxKeys(v int64) *ListObjectsV2Output { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectsV2Output) SetName(v string) *ListObjectsV2Output { + s.Name = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListObjectsV2Output) SetNextContinuationToken(v string) *ListObjectsV2Output { + s.NextContinuationToken = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsV2Output) SetPrefix(v string) *ListObjectsV2Output { + s.Prefix = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *ListObjectsV2Output) SetRequestCharged(v string) *ListObjectsV2Output { + s.RequestCharged = &v + return s +} + +// SetStartAfter sets the StartAfter field's value. +func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { + s.StartAfter = &v + return s +} + +type ListPartsInput struct { + _ struct{} `locationName:"ListPartsRequest" type:"structure"` + + // The name of the bucket to which the parts are being uploaded. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Sets the maximum number of parts to return. + MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"` + + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. + PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The server-side encryption (SSE) algorithm used to encrypt the object. This + // parameter is needed only when the object was created using a checksum algorithm. + // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // The server-side encryption (SSE) customer managed key. This parameter is + // needed only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by ListPartsInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // The MD5 server-side encryption (SSE) customer managed key. This parameter + // is needed only when the object was created using a checksum algorithm. For + // more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose parts are being listed. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListPartsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListPartsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPartsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPartsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListPartsInput) SetBucket(v string) *ListPartsInput { + s.Bucket = &v + return s +} + +func (s *ListPartsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListPartsInput) SetExpectedBucketOwner(v string) *ListPartsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ListPartsInput) SetKey(v string) *ListPartsInput { + s.Key = &v + return s +} + +// SetMaxParts sets the MaxParts field's value. +func (s *ListPartsInput) SetMaxParts(v int64) *ListPartsInput { + s.MaxParts = &v + return s +} + +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *ListPartsInput) SetPartNumberMarker(v int64) *ListPartsInput { + s.PartNumberMarker = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListPartsInput) SetRequestPayer(v string) *ListPartsInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *ListPartsInput) SetSSECustomerAlgorithm(v string) *ListPartsInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *ListPartsInput) SetSSECustomerKey(v string) *ListPartsInput { + s.SSECustomerKey = &v + return s +} + +func (s *ListPartsInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *ListPartsInput) SetSSECustomerKeyMD5(v string) *ListPartsInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput { + s.UploadId = &v + return s +} + +func (s *ListPartsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListPartsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListPartsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListPartsOutput struct { + _ struct{} `type:"structure"` + + // If the bucket has a lifecycle rule configured with an action to abort incomplete + // multipart uploads and the prefix in the lifecycle rule matches the object + // name in the request, then the response includes this header indicating when + // the initiated multipart upload will become eligible for abort operation. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + // + // The response will also include the x-amz-abort-rule-id header that will provide + // the ID of the lifecycle configuration rule that defines this action. + // + // This functionality is not supported for directory buckets. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` + + // This header is returned along with the x-amz-abort-date header. It identifies + // applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. + // + // This functionality is not supported for directory buckets. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + + // The name of the bucket to which the multipart upload was initiated. Does + // not return the access point ARN or access point alias if used. + Bucket *string `type:"string"` + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm *string `type:"string" enum:"ChecksumAlgorithm"` + + // Container element that identifies who initiated the multipart upload. If + // the initiator is an Amazon Web Services account, this element provides the + // same information as the Owner element. If the initiator is an IAM User, this + // element provides the user ARN and display name. + Initiator *Initiator `type:"structure"` + + // Indicates whether the returned list of parts is truncated. A true value indicates + // that the list was truncated. A list can be truncated if the number of parts + // exceeds the limit returned in the MaxParts element. + IsTruncated *bool `type:"boolean"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // Maximum number of parts that were allowed in the response. + MaxParts *int64 `type:"integer"` + + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the part-number-marker request parameter + // in a subsequent request. + NextPartNumberMarker *int64 `type:"integer"` + + // Container element that identifies the object owner, after the object is created. + // If multipart upload is initiated by an IAM user, this element provides the + // parent account ID and display name. + // + // Directory buckets - The bucket owner is returned as the object owner for + // all the parts. + Owner *Owner `type:"structure"` + + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. + PartNumberMarker *int64 `type:"integer"` + + // Container for elements related to a particular part. A response can contain + // zero or more Part elements. + Parts []*Part `locationName:"Part" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // The class of storage used to store the uploaded object. + // + // Directory buckets - Only the S3 Express One Zone storage class is supported + // by directory buckets to store objects. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID identifying the multipart upload whose parts are being listed. + UploadId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListPartsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListPartsOutput) GoString() string { + return s.String() +} + +// SetAbortDate sets the AbortDate field's value. +func (s *ListPartsOutput) SetAbortDate(v time.Time) *ListPartsOutput { + s.AbortDate = &v + return s +} + +// SetAbortRuleId sets the AbortRuleId field's value. +func (s *ListPartsOutput) SetAbortRuleId(v string) *ListPartsOutput { + s.AbortRuleId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *ListPartsOutput) SetBucket(v string) *ListPartsOutput { + s.Bucket = &v + return s +} + +func (s *ListPartsOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *ListPartsOutput) SetChecksumAlgorithm(v string) *ListPartsOutput { + s.ChecksumAlgorithm = &v + return s +} + +// SetInitiator sets the Initiator field's value. +func (s *ListPartsOutput) SetInitiator(v *Initiator) *ListPartsOutput { + s.Initiator = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListPartsOutput) SetIsTruncated(v bool) *ListPartsOutput { + s.IsTruncated = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ListPartsOutput) SetKey(v string) *ListPartsOutput { + s.Key = &v + return s +} + +// SetMaxParts sets the MaxParts field's value. +func (s *ListPartsOutput) SetMaxParts(v int64) *ListPartsOutput { + s.MaxParts = &v + return s +} + +// SetNextPartNumberMarker sets the NextPartNumberMarker field's value. +func (s *ListPartsOutput) SetNextPartNumberMarker(v int64) *ListPartsOutput { + s.NextPartNumberMarker = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ListPartsOutput) SetOwner(v *Owner) *ListPartsOutput { + s.Owner = v + return s +} + +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *ListPartsOutput) SetPartNumberMarker(v int64) *ListPartsOutput { + s.PartNumberMarker = &v + return s +} + +// SetParts sets the Parts field's value. +func (s *ListPartsOutput) SetParts(v []*Part) *ListPartsOutput { + s.Parts = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *ListPartsOutput) SetRequestCharged(v string) *ListPartsOutput { + s.RequestCharged = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *ListPartsOutput) SetStorageClass(v string) *ListPartsOutput { + s.StorageClass = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput { + s.UploadId = &v + return s +} + +// Describes an Amazon S3 location that will receive the results of the restore +// request. +type Location struct { + _ struct{} `type:"structure"` + + // A list of grants that control access to the staged results. + AccessControlList []*Grant `locationNameList:"Grant" type:"list"` + + // The name of the bucket where the restore results will be placed. + // + // BucketName is a required field + BucketName *string `type:"string" required:"true"` + + // The canned ACL to apply to the restore results. + CannedACL *string `type:"string" enum:"ObjectCannedACL"` + + // Contains the type of server-side encryption used. + Encryption *Encryption `type:"structure"` + + // The prefix that is prepended to the restore results for this request. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` + + // The class of storage used to store the restore results. + StorageClass *string `type:"string" enum:"StorageClass"` + + // The tag-set that is applied to the restore results. + Tagging *Tagging `type:"structure"` + + // A list of metadata to store with the restore results in S3. + UserMetadata []*MetadataEntry `locationNameList:"MetadataEntry" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Location) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Location) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Location) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Location"} + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.AccessControlList != nil { + for i, v := range s.AccessControlList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AccessControlList", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Encryption != nil { + if err := s.Encryption.Validate(); err != nil { + invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) + } + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessControlList sets the AccessControlList field's value. +func (s *Location) SetAccessControlList(v []*Grant) *Location { + s.AccessControlList = v + return s +} + +// SetBucketName sets the BucketName field's value. +func (s *Location) SetBucketName(v string) *Location { + s.BucketName = &v + return s +} + +// SetCannedACL sets the CannedACL field's value. +func (s *Location) SetCannedACL(v string) *Location { + s.CannedACL = &v + return s +} + +// SetEncryption sets the Encryption field's value. +func (s *Location) SetEncryption(v *Encryption) *Location { + s.Encryption = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *Location) SetPrefix(v string) *Location { + s.Prefix = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Location) SetStorageClass(v string) *Location { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *Location) SetTagging(v *Tagging) *Location { + s.Tagging = v + return s +} + +// SetUserMetadata sets the UserMetadata field's value. +func (s *Location) SetUserMetadata(v []*MetadataEntry) *Location { + s.UserMetadata = v + return s +} + +// Specifies the location where the bucket will be created. +// +// For directory buckets, the location type is Availability Zone. For more information +// about directory buckets, see Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) +// in the Amazon S3 User Guide. +// +// This functionality is only supported by directory buckets. +type LocationInfo struct { + _ struct{} `type:"structure"` + + // The name of the location where the bucket will be created. + // + // For directory buckets, the name of the location is the AZ ID of the Availability + // Zone where the bucket will be created. An example AZ ID value is usw2-az1. + Name *string `type:"string"` + + // The type of location where the bucket will be created. + Type *string `type:"string" enum:"LocationType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LocationInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LocationInfo) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *LocationInfo) SetName(v string) *LocationInfo { + s.Name = &v + return s +} + +// SetType sets the Type field's value. +func (s *LocationInfo) SetType(v string) *LocationInfo { + s.Type = &v + return s +} + +// Describes where logs are stored and the prefix that Amazon S3 assigns to +// all log object keys for a bucket. For more information, see PUT Bucket logging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) +// in the Amazon S3 API Reference. +type LoggingEnabled struct { + _ struct{} `type:"structure"` + + // Specifies the bucket where you want Amazon S3 to store server access logs. + // You can have your logs delivered to any bucket that you own, including the + // same bucket that is being logged. You can also configure multiple buckets + // to deliver their logs to the same target bucket. In this case, you should + // choose a different TargetPrefix for each source bucket so that the delivered + // log files can be distinguished by key. + // + // TargetBucket is a required field + TargetBucket *string `type:"string" required:"true"` + + // Container for granting information. + // + // Buckets that use the bucket owner enforced setting for Object Ownership don't + // support target grants. For more information, see Permissions for server access + // log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) + // in the Amazon S3 User Guide. + TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` + + // Amazon S3 key format for log objects. + TargetObjectKeyFormat *TargetObjectKeyFormat `type:"structure"` + + // A prefix for all log object keys. If you store log files from multiple Amazon + // S3 buckets in a single bucket, you can use a prefix to distinguish which + // log files came from which bucket. + // + // TargetPrefix is a required field + TargetPrefix *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LoggingEnabled) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LoggingEnabled) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LoggingEnabled) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"} + if s.TargetBucket == nil { + invalidParams.Add(request.NewErrParamRequired("TargetBucket")) + } + if s.TargetPrefix == nil { + invalidParams.Add(request.NewErrParamRequired("TargetPrefix")) + } + if s.TargetGrants != nil { + for i, v := range s.TargetGrants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetGrants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTargetBucket sets the TargetBucket field's value. +func (s *LoggingEnabled) SetTargetBucket(v string) *LoggingEnabled { + s.TargetBucket = &v + return s +} + +// SetTargetGrants sets the TargetGrants field's value. +func (s *LoggingEnabled) SetTargetGrants(v []*TargetGrant) *LoggingEnabled { + s.TargetGrants = v + return s +} + +// SetTargetObjectKeyFormat sets the TargetObjectKeyFormat field's value. +func (s *LoggingEnabled) SetTargetObjectKeyFormat(v *TargetObjectKeyFormat) *LoggingEnabled { + s.TargetObjectKeyFormat = v + return s +} + +// SetTargetPrefix sets the TargetPrefix field's value. +func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { + s.TargetPrefix = &v + return s +} + +// A metadata key-value pair to store with an object. +type MetadataEntry struct { + _ struct{} `type:"structure"` + + // Name of the object. + Name *string `type:"string"` + + // Value of the object. + Value *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetadataEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetadataEntry) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *MetadataEntry) SetName(v string) *MetadataEntry { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *MetadataEntry) SetValue(v string) *MetadataEntry { + s.Value = &v + return s +} + +// A container specifying replication metrics-related settings enabling replication +// metrics and events. +type Metrics struct { + _ struct{} `type:"structure"` + + // A container specifying the time threshold for emitting the s3:Replication:OperationMissedThreshold + // event. + EventThreshold *ReplicationTimeValue `type:"structure"` + + // Specifies whether the replication metrics are enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"MetricsStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Metrics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Metrics) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Metrics) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Metrics"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEventThreshold sets the EventThreshold field's value. +func (s *Metrics) SetEventThreshold(v *ReplicationTimeValue) *Metrics { + s.EventThreshold = v + return s +} + +// SetStatus sets the Status field's value. +func (s *Metrics) SetStatus(v string) *Metrics { + s.Status = &v + return s +} + +// A conjunction (logical AND) of predicates, which is used in evaluating a +// metrics filter. The operator must have at least two predicates, and an object +// must match all of the predicates in order for the filter to apply. +type MetricsAndOperator struct { + _ struct{} `type:"structure"` + + // The access point ARN used when evaluating an AND predicate. + AccessPointArn *string `type:"string"` + + // The prefix used when evaluating an AND predicate. + Prefix *string `type:"string"` + + // The list of tags used when evaluating an AND predicate. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricsAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricsAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessPointArn sets the AccessPointArn field's value. +func (s *MetricsAndOperator) SetAccessPointArn(v string) *MetricsAndOperator { + s.AccessPointArn = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *MetricsAndOperator) SetPrefix(v string) *MetricsAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator { + s.Tags = v + return s +} + +// Specifies a metrics configuration for the CloudWatch request metrics (specified +// by the metrics configuration ID) from an Amazon S3 bucket. If you're updating +// an existing metrics configuration, note that this is a full replacement of +// the existing metrics configuration. If you don't include the elements you +// want to keep, they are erased. For more information, see PutBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html). +type MetricsConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies a metrics configuration filter. The metrics configuration will + // only include objects that meet the filter's criteria. A filter must be a + // prefix, an object tag, an access point ARN, or a conjunction (MetricsAndOperator). + Filter *MetricsFilter `type:"structure"` + + // The ID used to identify the metrics configuration. The ID has a 64 character + // limit and can only contain letters, numbers, periods, dashes, and underscores. + // + // Id is a required field + Id *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricsConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *MetricsConfiguration) SetFilter(v *MetricsFilter) *MetricsConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration { + s.Id = &v + return s +} + +// Specifies a metrics configuration filter. The metrics configuration only +// includes objects that meet the filter's criteria. A filter must be a prefix, +// an object tag, an access point ARN, or a conjunction (MetricsAndOperator). +// For more information, see PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html). +type MetricsFilter struct { + _ struct{} `type:"structure"` + + // The access point ARN used when evaluating a metrics filter. + AccessPointArn *string `type:"string"` + + // A conjunction (logical AND) of predicates, which is used in evaluating a + // metrics filter. The operator must have at least two predicates, and an object + // must match all of the predicates in order for the filter to apply. + And *MetricsAndOperator `type:"structure"` + + // The prefix used when evaluating a metrics filter. + Prefix *string `type:"string"` + + // The tag used when evaluating a metrics filter. + Tag *Tag `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricsFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessPointArn sets the AccessPointArn field's value. +func (s *MetricsFilter) SetAccessPointArn(v string) *MetricsFilter { + s.AccessPointArn = &v + return s +} + +// SetAnd sets the And field's value. +func (s *MetricsFilter) SetAnd(v *MetricsAndOperator) *MetricsFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *MetricsFilter) SetPrefix(v string) *MetricsFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter { + s.Tag = v + return s +} + +// Container for the MultipartUpload for the Amazon S3 object. +type MultipartUpload struct { + _ struct{} `type:"structure"` + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm *string `type:"string" enum:"ChecksumAlgorithm"` + + // Date and time at which the multipart upload was initiated. + Initiated *time.Time `type:"timestamp"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Key of the object for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // Specifies the owner of the object that is part of the multipart upload. + // + // Directory buckets - The bucket owner is returned as the object owner for + // all the objects. + Owner *Owner `type:"structure"` + + // The class of storage used to store the object. + // + // Directory buckets - Only the S3 Express One Zone storage class is supported + // by directory buckets to store objects. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID that identifies the multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MultipartUpload) GoString() string { + return s.String() +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *MultipartUpload) SetChecksumAlgorithm(v string) *MultipartUpload { + s.ChecksumAlgorithm = &v + return s +} + +// SetInitiated sets the Initiated field's value. +func (s *MultipartUpload) SetInitiated(v time.Time) *MultipartUpload { + s.Initiated = &v + return s +} + +// SetInitiator sets the Initiator field's value. +func (s *MultipartUpload) SetInitiator(v *Initiator) *MultipartUpload { + s.Initiator = v + return s +} + +// SetKey sets the Key field's value. +func (s *MultipartUpload) SetKey(v string) *MultipartUpload { + s.Key = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *MultipartUpload) SetOwner(v *Owner) *MultipartUpload { + s.Owner = v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *MultipartUpload) SetStorageClass(v string) *MultipartUpload { + s.StorageClass = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload { + s.UploadId = &v + return s +} + +// Specifies when noncurrent object versions expire. Upon expiration, Amazon +// S3 permanently deletes the noncurrent object versions. You set this lifecycle +// configuration action on a bucket that has versioning enabled (or suspended) +// to request that Amazon S3 delete noncurrent object versions at a specific +// period in the object's lifetime. +type NoncurrentVersionExpiration struct { + _ struct{} `type:"structure"` + + // Specifies how many noncurrent versions Amazon S3 will retain. You can specify + // up to 100 noncurrent versions to retain. Amazon S3 will permanently delete + // any additional noncurrent versions beyond the specified number to retain. + // For more information about noncurrent versions, see Lifecycle configuration + // elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) + // in the Amazon S3 User Guide. + NewerNoncurrentVersions *int64 `type:"integer"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. The value must be a non-zero positive integer. + // For information about the noncurrent days calculations, see How Amazon S3 + // Calculates When an Object Became Noncurrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) + // in the Amazon S3 User Guide. + NoncurrentDays *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NoncurrentVersionExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NoncurrentVersionExpiration) GoString() string { + return s.String() +} + +// SetNewerNoncurrentVersions sets the NewerNoncurrentVersions field's value. +func (s *NoncurrentVersionExpiration) SetNewerNoncurrentVersions(v int64) *NoncurrentVersionExpiration { + s.NewerNoncurrentVersions = &v + return s +} + +// SetNoncurrentDays sets the NoncurrentDays field's value. +func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVersionExpiration { + s.NoncurrentDays = &v + return s +} + +// Container for the transition rule that describes when noncurrent objects +// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER_IR, +// GLACIER, or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled +// (or versioning is suspended), you can set this action to request that Amazon +// S3 transition noncurrent object versions to the STANDARD_IA, ONEZONE_IA, +// INTELLIGENT_TIERING, GLACIER_IR, GLACIER, or DEEP_ARCHIVE storage class at +// a specific period in the object's lifetime. +type NoncurrentVersionTransition struct { + _ struct{} `type:"structure"` + + // Specifies how many noncurrent versions Amazon S3 will retain in the same + // storage class before transitioning objects. You can specify up to 100 noncurrent + // versions to retain. Amazon S3 will transition any additional noncurrent versions + // beyond the specified number to retain. For more information about noncurrent + // versions, see Lifecycle configuration elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) + // in the Amazon S3 User Guide. + NewerNoncurrentVersions *int64 `type:"integer"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates How Long an Object Has Been Noncurrent + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) + // in the Amazon S3 User Guide. + NoncurrentDays *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NoncurrentVersionTransition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NoncurrentVersionTransition) GoString() string { + return s.String() +} + +// SetNewerNoncurrentVersions sets the NewerNoncurrentVersions field's value. +func (s *NoncurrentVersionTransition) SetNewerNoncurrentVersions(v int64) *NoncurrentVersionTransition { + s.NewerNoncurrentVersions = &v + return s +} + +// SetNoncurrentDays sets the NoncurrentDays field's value. +func (s *NoncurrentVersionTransition) SetNoncurrentDays(v int64) *NoncurrentVersionTransition { + s.NoncurrentDays = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersionTransition { + s.StorageClass = &v + return s +} + +// A container for specifying the notification configuration of the bucket. +// If this element is empty, notifications are turned off for the bucket. +type NotificationConfiguration struct { + _ struct{} `type:"structure"` + + // Enables delivery of events to Amazon EventBridge. + EventBridgeConfiguration *EventBridgeConfiguration `type:"structure"` + + // Describes the Lambda functions to invoke and the events for which to invoke + // them. + LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` + + // The Amazon Simple Queue Service queues to publish messages to and the events + // for which to publish messages. + QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` + + // The topic to which notifications are sent and the events for which notifications + // are generated. + TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotificationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotificationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NotificationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NotificationConfiguration"} + if s.LambdaFunctionConfigurations != nil { + for i, v := range s.LambdaFunctionConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LambdaFunctionConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.QueueConfigurations != nil { + for i, v := range s.QueueConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueueConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.TopicConfigurations != nil { + for i, v := range s.TopicConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TopicConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEventBridgeConfiguration sets the EventBridgeConfiguration field's value. +func (s *NotificationConfiguration) SetEventBridgeConfiguration(v *EventBridgeConfiguration) *NotificationConfiguration { + s.EventBridgeConfiguration = v + return s +} + +// SetLambdaFunctionConfigurations sets the LambdaFunctionConfigurations field's value. +func (s *NotificationConfiguration) SetLambdaFunctionConfigurations(v []*LambdaFunctionConfiguration) *NotificationConfiguration { + s.LambdaFunctionConfigurations = v + return s +} + +// SetQueueConfigurations sets the QueueConfigurations field's value. +func (s *NotificationConfiguration) SetQueueConfigurations(v []*QueueConfiguration) *NotificationConfiguration { + s.QueueConfigurations = v + return s +} + +// SetTopicConfigurations sets the TopicConfigurations field's value. +func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfiguration) *NotificationConfiguration { + s.TopicConfigurations = v + return s +} + +type NotificationConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // Container for specifying the Lambda notification configuration. + CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` + + // This data type is deprecated. This data type specifies the configuration + // for publishing messages to an Amazon Simple Queue Service (Amazon SQS) queue + // when Amazon S3 detects specified events. + QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` + + // This data type is deprecated. A container for specifying the configuration + // for publication of messages to an Amazon Simple Notification Service (Amazon + // SNS) topic when Amazon S3 detects specified events. + TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotificationConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotificationConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetCloudFunctionConfiguration sets the CloudFunctionConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetCloudFunctionConfiguration(v *CloudFunctionConfiguration) *NotificationConfigurationDeprecated { + s.CloudFunctionConfiguration = v + return s +} + +// SetQueueConfiguration sets the QueueConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetQueueConfiguration(v *QueueConfigurationDeprecated) *NotificationConfigurationDeprecated { + s.QueueConfiguration = v + return s +} + +// SetTopicConfiguration sets the TopicConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConfigurationDeprecated) *NotificationConfigurationDeprecated { + s.TopicConfiguration = v + return s +} + +// Specifies object key name filtering rules. For information about key name +// filtering, see Configuring event notifications using object key name filtering +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) +// in the Amazon S3 User Guide. +type NotificationConfigurationFilter struct { + _ struct{} `type:"structure"` + + // A container for object key name prefix and suffix filtering rules. + Key *KeyFilter `locationName:"S3Key" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotificationConfigurationFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotificationConfigurationFilter) GoString() string { + return s.String() +} + +// SetKey sets the Key field's value. +func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConfigurationFilter { + s.Key = v + return s +} + +// An object consists of data and its descriptive metadata. +type Object struct { + _ struct{} `type:"structure"` + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm []*string `type:"list" flattened:"true" enum:"ChecksumAlgorithm"` + + // The entity tag is a hash of the object. The ETag reflects changes only to + // the contents of an object, not its metadata. The ETag may or may not be an + // MD5 digest of the object data. Whether or not it is depends on how the object + // was created and how it is encrypted as described below: + // + // * Objects created by the PUT Object, POST Object, or Copy operation, or + // through the Amazon Web Services Management Console, and are encrypted + // by SSE-S3 or plaintext, have ETags that are an MD5 digest of their object + // data. + // + // * Objects created by the PUT Object, POST Object, or Copy operation, or + // through the Amazon Web Services Management Console, and are encrypted + // by SSE-C or SSE-KMS, have ETags that are not an MD5 digest of their object + // data. + // + // * If an object is created by either the Multipart Upload or Part Copy + // operation, the ETag is not an MD5 digest, regardless of the method of + // encryption. If an object is larger than 16 MB, the Amazon Web Services + // Management Console will upload or copy that object as a Multipart Upload, + // and therefore the ETag will not be an MD5 digest. + // + // Directory buckets - MD5 is not supported by directory buckets. + ETag *string `type:"string"` + + // The name that you assign to an object. You use the object key to retrieve + // the object. + Key *string `min:"1" type:"string"` + + // Creation date of the object. + LastModified *time.Time `type:"timestamp"` + + // The owner of the object + // + // Directory buckets - The bucket owner is returned as the object owner. + Owner *Owner `type:"structure"` + + // Specifies the restoration status of an object. Objects in certain storage + // classes must be restored before they can be retrieved. For more information + // about these storage classes and how to work with archived objects, see Working + // with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. Only the S3 Express + // One Zone storage class is supported by directory buckets to store objects. + RestoreStatus *RestoreStatus `type:"structure"` + + // Size in bytes of the object + Size *int64 `type:"long"` + + // The class of storage used to store the object. + // + // Directory buckets - Only the S3 Express One Zone storage class is supported + // by directory buckets to store objects. + StorageClass *string `type:"string" enum:"ObjectStorageClass"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Object) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Object) GoString() string { + return s.String() +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *Object) SetChecksumAlgorithm(v []*string) *Object { + s.ChecksumAlgorithm = v + return s +} + +// SetETag sets the ETag field's value. +func (s *Object) SetETag(v string) *Object { + s.ETag = &v + return s +} + +// SetKey sets the Key field's value. +func (s *Object) SetKey(v string) *Object { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *Object) SetLastModified(v time.Time) *Object { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *Object) SetOwner(v *Owner) *Object { + s.Owner = v + return s +} + +// SetRestoreStatus sets the RestoreStatus field's value. +func (s *Object) SetRestoreStatus(v *RestoreStatus) *Object { + s.RestoreStatus = v + return s +} + +// SetSize sets the Size field's value. +func (s *Object) SetSize(v int64) *Object { + s.Size = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Object) SetStorageClass(v string) *Object { + s.StorageClass = &v + return s +} + +// Object Identifier is unique value to identify objects. +type ObjectIdentifier struct { + _ struct{} `type:"structure"` + + // Key name of the object. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // Version ID for the specific version of the object to delete. + // + // This functionality is not supported for directory buckets. + VersionId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectIdentifier) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ObjectIdentifier) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ObjectIdentifier"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *ObjectIdentifier) SetKey(v string) *ObjectIdentifier { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { + s.VersionId = &v + return s +} + +// The container element for Object Lock configuration parameters. +type ObjectLockConfiguration struct { + _ struct{} `type:"structure"` + + // Indicates whether this bucket has an Object Lock configuration enabled. Enable + // ObjectLockEnabled when you apply ObjectLockConfiguration to a bucket. + ObjectLockEnabled *string `type:"string" enum:"ObjectLockEnabled"` + + // Specifies the Object Lock rule for the specified object. Enable the this + // rule when you apply ObjectLockConfiguration to a bucket. Bucket settings + // require both a mode and a period. The period can be either Days or Years + // but you must select one. You cannot specify Days and Years at the same time. + Rule *ObjectLockRule `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectLockConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectLockConfiguration) GoString() string { + return s.String() +} + +// SetObjectLockEnabled sets the ObjectLockEnabled field's value. +func (s *ObjectLockConfiguration) SetObjectLockEnabled(v string) *ObjectLockConfiguration { + s.ObjectLockEnabled = &v + return s +} + +// SetRule sets the Rule field's value. +func (s *ObjectLockConfiguration) SetRule(v *ObjectLockRule) *ObjectLockConfiguration { + s.Rule = v + return s +} + +// A legal hold configuration for an object. +type ObjectLockLegalHold struct { + _ struct{} `type:"structure"` + + // Indicates whether the specified object has a legal hold in place. + Status *string `type:"string" enum:"ObjectLockLegalHoldStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectLockLegalHold) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectLockLegalHold) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *ObjectLockLegalHold) SetStatus(v string) *ObjectLockLegalHold { + s.Status = &v + return s +} + +// A Retention configuration for an object. +type ObjectLockRetention struct { + _ struct{} `type:"structure"` + + // Indicates the Retention mode for the specified object. + Mode *string `type:"string" enum:"ObjectLockRetentionMode"` + + // The date on which this Object Lock Retention will expire. + RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectLockRetention) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectLockRetention) GoString() string { + return s.String() +} + +// SetMode sets the Mode field's value. +func (s *ObjectLockRetention) SetMode(v string) *ObjectLockRetention { + s.Mode = &v + return s +} + +// SetRetainUntilDate sets the RetainUntilDate field's value. +func (s *ObjectLockRetention) SetRetainUntilDate(v time.Time) *ObjectLockRetention { + s.RetainUntilDate = &v + return s +} + +// The container element for an Object Lock rule. +type ObjectLockRule struct { + _ struct{} `type:"structure"` + + // The default Object Lock retention mode and period that you want to apply + // to new objects placed in the specified bucket. Bucket settings require both + // a mode and a period. The period can be either Days or Years but you must + // select one. You cannot specify Days and Years at the same time. + DefaultRetention *DefaultRetention `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectLockRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectLockRule) GoString() string { + return s.String() +} + +// SetDefaultRetention sets the DefaultRetention field's value. +func (s *ObjectLockRule) SetDefaultRetention(v *DefaultRetention) *ObjectLockRule { + s.DefaultRetention = v + return s +} + +// A container for elements related to an individual part. +type ObjectPart struct { + _ struct{} `type:"structure"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32 checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `type:"string"` + + // The part number identifying the part. This value is a positive integer between + // 1 and 10,000. + PartNumber *int64 `type:"integer"` + + // The size of the uploaded part in bytes. + Size *int64 `type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectPart) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectPart) GoString() string { + return s.String() +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *ObjectPart) SetChecksumCRC32(v string) *ObjectPart { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *ObjectPart) SetChecksumCRC32C(v string) *ObjectPart { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *ObjectPart) SetChecksumSHA1(v string) *ObjectPart { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *ObjectPart) SetChecksumSHA256(v string) *ObjectPart { + s.ChecksumSHA256 = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *ObjectPart) SetPartNumber(v int64) *ObjectPart { + s.PartNumber = &v + return s +} + +// SetSize sets the Size field's value. +func (s *ObjectPart) SetSize(v int64) *ObjectPart { + s.Size = &v + return s +} + +// The version of an object. +type ObjectVersion struct { + _ struct{} `type:"structure"` + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm []*string `type:"list" flattened:"true" enum:"ChecksumAlgorithm"` + + // The entity tag is an MD5 hash of that version of the object. + ETag *string `type:"string"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time when the object was last modified. + LastModified *time.Time `type:"timestamp"` + + // Specifies the owner of the object. + Owner *Owner `type:"structure"` + + // Specifies the restoration status of an object. Objects in certain storage + // classes must be restored before they can be retrieved. For more information + // about these storage classes and how to work with archived objects, see Working + // with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) + // in the Amazon S3 User Guide. + RestoreStatus *RestoreStatus `type:"structure"` + + // Size in bytes of the object. + Size *int64 `type:"long"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectVersion) GoString() string { + return s.String() +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *ObjectVersion) SetChecksumAlgorithm(v []*string) *ObjectVersion { + s.ChecksumAlgorithm = v + return s +} + +// SetETag sets the ETag field's value. +func (s *ObjectVersion) SetETag(v string) *ObjectVersion { + s.ETag = &v + return s +} + +// SetIsLatest sets the IsLatest field's value. +func (s *ObjectVersion) SetIsLatest(v bool) *ObjectVersion { + s.IsLatest = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ObjectVersion) SetKey(v string) *ObjectVersion { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *ObjectVersion) SetLastModified(v time.Time) *ObjectVersion { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ObjectVersion) SetOwner(v *Owner) *ObjectVersion { + s.Owner = v + return s +} + +// SetRestoreStatus sets the RestoreStatus field's value. +func (s *ObjectVersion) SetRestoreStatus(v *RestoreStatus) *ObjectVersion { + s.RestoreStatus = v + return s +} + +// SetSize sets the Size field's value. +func (s *ObjectVersion) SetSize(v int64) *ObjectVersion { + s.Size = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *ObjectVersion) SetStorageClass(v string) *ObjectVersion { + s.StorageClass = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *ObjectVersion) SetVersionId(v string) *ObjectVersion { + s.VersionId = &v + return s +} + +// Describes the location where the restore job's output is stored. +type OutputLocation struct { + _ struct{} `type:"structure"` + + // Describes an S3 location that will receive the results of the restore request. + S3 *Location `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OutputLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OutputLocation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OutputLocation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OutputLocation"} + if s.S3 != nil { + if err := s.S3.Validate(); err != nil { + invalidParams.AddNested("S3", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3 sets the S3 field's value. +func (s *OutputLocation) SetS3(v *Location) *OutputLocation { + s.S3 = v + return s +} + +// Describes how results of the Select job are serialized. +type OutputSerialization struct { + _ struct{} `type:"structure"` + + // Describes the serialization of CSV-encoded Select results. + CSV *CSVOutput `type:"structure"` + + // Specifies JSON as request's output serialization format. + JSON *JSONOutput `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OutputSerialization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OutputSerialization) GoString() string { + return s.String() +} + +// SetCSV sets the CSV field's value. +func (s *OutputSerialization) SetCSV(v *CSVOutput) *OutputSerialization { + s.CSV = v + return s +} + +// SetJSON sets the JSON field's value. +func (s *OutputSerialization) SetJSON(v *JSONOutput) *OutputSerialization { + s.JSON = v + return s +} + +// Container for the owner's display name and ID. +type Owner struct { + _ struct{} `type:"structure"` + + // Container for the display name of the owner. This value is only supported + // in the following Amazon Web Services Regions: + // + // * US East (N. Virginia) + // + // * US West (N. California) + // + // * US West (Oregon) + // + // * Asia Pacific (Singapore) + // + // * Asia Pacific (Sydney) + // + // * Asia Pacific (Tokyo) + // + // * Europe (Ireland) + // + // * South America (São Paulo) + // + // This functionality is not supported for directory buckets. + DisplayName *string `type:"string"` + + // Container for the ID of the owner. + ID *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Owner) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Owner) GoString() string { + return s.String() +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Owner) SetDisplayName(v string) *Owner { + s.DisplayName = &v + return s +} + +// SetID sets the ID field's value. +func (s *Owner) SetID(v string) *Owner { + s.ID = &v + return s +} + +// The container element for a bucket's ownership controls. +type OwnershipControls struct { + _ struct{} `type:"structure"` + + // The container element for an ownership control rule. + // + // Rules is a required field + Rules []*OwnershipControlsRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OwnershipControls) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OwnershipControls) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OwnershipControls) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OwnershipControls"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *OwnershipControls) SetRules(v []*OwnershipControlsRule) *OwnershipControls { + s.Rules = v + return s +} + +// The container element for an ownership control rule. +type OwnershipControlsRule struct { + _ struct{} `type:"structure"` + + // The container element for object ownership for a bucket's ownership controls. + // + // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to + // the bucket owner if the objects are uploaded with the bucket-owner-full-control + // canned ACL. + // + // ObjectWriter - The uploading account will own the object if the object is + // uploaded with the bucket-owner-full-control canned ACL. + // + // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer + // affect permissions. The bucket owner automatically owns and has full control + // over every object in the bucket. The bucket only accepts PUT requests that + // don't specify an ACL or specify bucket owner full control ACLs (such as the + // predefined bucket-owner-full-control canned ACL or a custom ACL in XML format + // that grants the same permissions). + // + // By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are disabled. + // We recommend keeping ACLs disabled, except in uncommon use cases where you + // must control access for each object individually. For more information about + // S3 Object Ownership, see Controlling ownership of objects and disabling ACLs + // for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. Directory buckets + // use the bucket owner enforced setting for S3 Object Ownership. + // + // ObjectOwnership is a required field + ObjectOwnership *string `type:"string" required:"true" enum:"ObjectOwnership"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OwnershipControlsRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OwnershipControlsRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OwnershipControlsRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OwnershipControlsRule"} + if s.ObjectOwnership == nil { + invalidParams.Add(request.NewErrParamRequired("ObjectOwnership")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetObjectOwnership sets the ObjectOwnership field's value. +func (s *OwnershipControlsRule) SetObjectOwnership(v string) *OwnershipControlsRule { + s.ObjectOwnership = &v + return s +} + +// Container for Parquet. +type ParquetInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ParquetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ParquetInput) GoString() string { + return s.String() +} + +// Container for elements related to a part. +type Part struct { + _ struct{} `type:"structure"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32 checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 256-bit SHA-256 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `type:"string"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Date and time at which the part was uploaded. + LastModified *time.Time `type:"timestamp"` + + // Part number identifying the part. This is a positive integer between 1 and + // 10,000. + PartNumber *int64 `type:"integer"` + + // Size in bytes of the uploaded part data. + Size *int64 `type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Part) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Part) GoString() string { + return s.String() +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *Part) SetChecksumCRC32(v string) *Part { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *Part) SetChecksumCRC32C(v string) *Part { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *Part) SetChecksumSHA1(v string) *Part { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *Part) SetChecksumSHA256(v string) *Part { + s.ChecksumSHA256 = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *Part) SetETag(v string) *Part { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *Part) SetLastModified(v time.Time) *Part { + s.LastModified = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *Part) SetPartNumber(v int64) *Part { + s.PartNumber = &v + return s +} + +// SetSize sets the Size field's value. +func (s *Part) SetSize(v int64) *Part { + s.Size = &v + return s +} + +// Amazon S3 keys for log objects are partitioned in the following format: +// +// [DestinationPrefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] +// +// PartitionedPrefix defaults to EventTime delivery when server access logs +// are delivered. +type PartitionedPrefix struct { + _ struct{} `locationName:"PartitionedPrefix" type:"structure"` + + // Specifies the partition date source for the partitioned prefix. PartitionDateSource + // can be EventTime or DeliveryTime. + PartitionDateSource *string `type:"string" enum:"PartitionDateSource"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PartitionedPrefix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PartitionedPrefix) GoString() string { + return s.String() +} + +// SetPartitionDateSource sets the PartitionDateSource field's value. +func (s *PartitionedPrefix) SetPartitionDateSource(v string) *PartitionedPrefix { + s.PartitionDateSource = &v + return s +} + +// The container element for a bucket's policy status. +type PolicyStatus struct { + _ struct{} `type:"structure"` + + // The policy status for this bucket. TRUE indicates that this bucket is public. + // FALSE indicates that the bucket is not public. + IsPublic *bool `locationName:"IsPublic" type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PolicyStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PolicyStatus) GoString() string { + return s.String() +} + +// SetIsPublic sets the IsPublic field's value. +func (s *PolicyStatus) SetIsPublic(v bool) *PolicyStatus { + s.IsPublic = &v + return s +} + +// This data type contains information about progress of an operation. +type Progress struct { + _ struct{} `type:"structure"` + + // The current number of uncompressed object bytes processed. + BytesProcessed *int64 `type:"long"` + + // The current number of bytes of records payload data returned. + BytesReturned *int64 `type:"long"` + + // The current number of object bytes scanned. + BytesScanned *int64 `type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Progress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Progress) GoString() string { + return s.String() +} + +// SetBytesProcessed sets the BytesProcessed field's value. +func (s *Progress) SetBytesProcessed(v int64) *Progress { + s.BytesProcessed = &v + return s +} + +// SetBytesReturned sets the BytesReturned field's value. +func (s *Progress) SetBytesReturned(v int64) *Progress { + s.BytesReturned = &v + return s +} + +// SetBytesScanned sets the BytesScanned field's value. +func (s *Progress) SetBytesScanned(v int64) *Progress { + s.BytesScanned = &v + return s +} + +// This data type contains information about the progress event of an operation. +type ProgressEvent struct { + _ struct{} `locationName:"ProgressEvent" type:"structure" payload:"Details"` + + // The Progress event details. + Details *Progress `locationName:"Details" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProgressEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProgressEvent) GoString() string { + return s.String() +} + +// SetDetails sets the Details field's value. +func (s *ProgressEvent) SetDetails(v *Progress) *ProgressEvent { + s.Details = v + return s +} + +// The ProgressEvent is and event in the SelectObjectContentEventStream group of events. +func (s *ProgressEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the ProgressEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *ProgressEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err + } + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *ProgressEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + var buf bytes.Buffer + if err = pm.MarshalPayload(&buf, s); err != nil { + return eventstream.Message{}, err + } + msg.Payload = buf.Bytes() + return msg, err +} + +// The PublicAccessBlock configuration that you want to apply to this Amazon +// S3 bucket. You can enable the configuration options in any combination. For +// more information about when Amazon S3 considers a bucket or object public, +// see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) +// in the Amazon S3 User Guide. +type PublicAccessBlockConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 should block public access control lists (ACLs) + // for this bucket and objects in this bucket. Setting this element to TRUE + // causes the following behavior: + // + // * PUT Bucket ACL and PUT Object ACL calls fail if the specified ACL is + // public. + // + // * PUT Object calls fail if the request includes a public ACL. + // + // * PUT Bucket calls fail if the request includes a public ACL. + // + // Enabling this setting doesn't affect existing policies or ACLs. + BlockPublicAcls *bool `locationName:"BlockPublicAcls" type:"boolean"` + + // Specifies whether Amazon S3 should block public bucket policies for this + // bucket. Setting this element to TRUE causes Amazon S3 to reject calls to + // PUT Bucket policy if the specified bucket policy allows public access. + // + // Enabling this setting doesn't affect existing bucket policies. + BlockPublicPolicy *bool `locationName:"BlockPublicPolicy" type:"boolean"` + + // Specifies whether Amazon S3 should ignore public ACLs for this bucket and + // objects in this bucket. Setting this element to TRUE causes Amazon S3 to + // ignore all public ACLs on this bucket and objects in this bucket. + // + // Enabling this setting doesn't affect the persistence of any existing ACLs + // and doesn't prevent new public ACLs from being set. + IgnorePublicAcls *bool `locationName:"IgnorePublicAcls" type:"boolean"` + + // Specifies whether Amazon S3 should restrict public bucket policies for this + // bucket. Setting this element to TRUE restricts access to this bucket to only + // Amazon Web Service principals and authorized users within this account if + // the bucket has a public policy. + // + // Enabling this setting doesn't affect previously stored bucket policies, except + // that public and cross-account access within any public bucket policy, including + // non-public delegation to specific accounts, is blocked. + RestrictPublicBuckets *bool `locationName:"RestrictPublicBuckets" type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PublicAccessBlockConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PublicAccessBlockConfiguration) GoString() string { + return s.String() +} + +// SetBlockPublicAcls sets the BlockPublicAcls field's value. +func (s *PublicAccessBlockConfiguration) SetBlockPublicAcls(v bool) *PublicAccessBlockConfiguration { + s.BlockPublicAcls = &v + return s +} + +// SetBlockPublicPolicy sets the BlockPublicPolicy field's value. +func (s *PublicAccessBlockConfiguration) SetBlockPublicPolicy(v bool) *PublicAccessBlockConfiguration { + s.BlockPublicPolicy = &v + return s +} + +// SetIgnorePublicAcls sets the IgnorePublicAcls field's value. +func (s *PublicAccessBlockConfiguration) SetIgnorePublicAcls(v bool) *PublicAccessBlockConfiguration { + s.IgnorePublicAcls = &v + return s +} + +// SetRestrictPublicBuckets sets the RestrictPublicBuckets field's value. +func (s *PublicAccessBlockConfiguration) SetRestrictPublicBuckets(v bool) *PublicAccessBlockConfiguration { + s.RestrictPublicBuckets = &v + return s +} + +type PutBucketAccelerateConfigurationInput struct { + _ struct{} `locationName:"PutBucketAccelerateConfigurationRequest" type:"structure" payload:"AccelerateConfiguration"` + + // Container for setting the transfer acceleration state. + // + // AccelerateConfiguration is a required field + AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The name of the bucket for which the accelerate configuration is set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAccelerateConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAccelerateConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAccelerateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAccelerateConfigurationInput"} + if s.AccelerateConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("AccelerateConfiguration")) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccelerateConfiguration sets the AccelerateConfiguration field's value. +func (s *PutBucketAccelerateConfigurationInput) SetAccelerateConfiguration(v *AccelerateConfiguration) *PutBucketAccelerateConfigurationInput { + s.AccelerateConfiguration = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketAccelerateConfigurationInput) SetBucket(v string) *PutBucketAccelerateConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketAccelerateConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketAccelerateConfigurationInput) SetChecksumAlgorithm(v string) *PutBucketAccelerateConfigurationInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketAccelerateConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketAccelerateConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *PutBucketAccelerateConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketAccelerateConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketAccelerateConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketAccelerateConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAccelerateConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAccelerateConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketAclInput struct { + _ struct{} `locationName:"PutBucketAclRequest" type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + // Contains the elements that set the ACL permissions for an object per grantee. + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The bucket to which to apply the ACL. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create new objects in the bucket. + // + // For the bucket and object owners of existing objects, also allows deletions + // and overwrites of those objects. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutBucketAclInput) SetACL(v string) *PutBucketAclInput { + s.ACL = &v + return s +} + +// SetAccessControlPolicy sets the AccessControlPolicy field's value. +func (s *PutBucketAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutBucketAclInput { + s.AccessControlPolicy = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketAclInput) SetBucket(v string) *PutBucketAclInput { + s.Bucket = &v + return s +} + +func (s *PutBucketAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketAclInput) SetChecksumAlgorithm(v string) *PutBucketAclInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketAclInput) SetExpectedBucketOwner(v string) *PutBucketAclInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutBucketAclInput) SetGrantFullControl(v string) *PutBucketAclInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutBucketAclInput) SetGrantRead(v string) *PutBucketAclInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutBucketAclInput) SetGrantReadACP(v string) *PutBucketAclInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *PutBucketAclInput) SetGrantWrite(v string) *PutBucketAclInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput { + s.GrantWriteACP = &v + return s +} + +func (s *PutBucketAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketAclInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketAclOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAclOutput) GoString() string { + return s.String() +} + +type PutBucketAnalyticsConfigurationInput struct { + _ struct{} `locationName:"PutBucketAnalyticsConfigurationRequest" type:"structure" payload:"AnalyticsConfiguration"` + + // The configuration and any analyses for the analytics filter. + // + // AnalyticsConfiguration is a required field + AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The name of the bucket to which an analytics configuration is stored. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID that identifies the analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAnalyticsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAnalyticsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAnalyticsConfigurationInput"} + if s.AnalyticsConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("AnalyticsConfiguration")) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.AnalyticsConfiguration != nil { + if err := s.AnalyticsConfiguration.Validate(); err != nil { + invalidParams.AddNested("AnalyticsConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *PutBucketAnalyticsConfigurationInput { + s.AnalyticsConfiguration = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetBucket(v string) *PutBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketAnalyticsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +func (s *PutBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketAnalyticsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketAnalyticsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketCorsInput struct { + _ struct{} `locationName:"PutBucketCorsRequest" type:"structure" payload:"CORSConfiguration"` + + // Specifies the bucket impacted by the corsconfiguration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Describes the cross-origin access configuration for objects in an Amazon + // S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon + // S3 User Guide. + // + // CORSConfiguration is a required field + CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.CORSConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("CORSConfiguration")) + } + if s.CORSConfiguration != nil { + if err := s.CORSConfiguration.Validate(); err != nil { + invalidParams.AddNested("CORSConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketCorsInput) SetBucket(v string) *PutBucketCorsInput { + s.Bucket = &v + return s +} + +func (s *PutBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCORSConfiguration sets the CORSConfiguration field's value. +func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput { + s.CORSConfiguration = v + return s +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketCorsInput) SetChecksumAlgorithm(v string) *PutBucketCorsInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketCorsInput) SetExpectedBucketOwner(v string) *PutBucketCorsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *PutBucketCorsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketCorsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketCorsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketCorsOutput) GoString() string { + return s.String() +} + +type PutBucketEncryptionInput struct { + _ struct{} `locationName:"PutBucketEncryptionRequest" type:"structure" payload:"ServerSideEncryptionConfiguration"` + + // Specifies default encryption for a bucket using server-side encryption with + // different key options. By default, all buckets have a default encryption + // configuration that uses server-side encryption with Amazon S3 managed keys + // (SSE-S3). You can optionally configure default encryption for a bucket by + // using server-side encryption with an Amazon Web Services KMS key (SSE-KMS) + // or a customer-provided key (SSE-C). For information about the bucket default + // encryption feature, see Amazon S3 Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Specifies the default server-side-encryption configuration. + // + // ServerSideEncryptionConfiguration is a required field + ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `locationName:"ServerSideEncryptionConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.ServerSideEncryptionConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ServerSideEncryptionConfiguration")) + } + if s.ServerSideEncryptionConfiguration != nil { + if err := s.ServerSideEncryptionConfiguration.Validate(); err != nil { + invalidParams.AddNested("ServerSideEncryptionConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketEncryptionInput) SetBucket(v string) *PutBucketEncryptionInput { + s.Bucket = &v + return s +} + +func (s *PutBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketEncryptionInput) SetChecksumAlgorithm(v string) *PutBucketEncryptionInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketEncryptionInput) SetExpectedBucketOwner(v string) *PutBucketEncryptionInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value. +func (s *PutBucketEncryptionInput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *PutBucketEncryptionInput { + s.ServerSideEncryptionConfiguration = v + return s +} + +func (s *PutBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketEncryptionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketEncryptionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketEncryptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketEncryptionOutput) GoString() string { + return s.String() +} + +type PutBucketIntelligentTieringConfigurationInput struct { + _ struct{} `locationName:"PutBucketIntelligentTieringConfigurationRequest" type:"structure" payload:"IntelligentTieringConfiguration"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // Container for S3 Intelligent-Tiering configuration. + // + // IntelligentTieringConfiguration is a required field + IntelligentTieringConfiguration *IntelligentTieringConfiguration `locationName:"IntelligentTieringConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketIntelligentTieringConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketIntelligentTieringConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketIntelligentTieringConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketIntelligentTieringConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.IntelligentTieringConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("IntelligentTieringConfiguration")) + } + if s.IntelligentTieringConfiguration != nil { + if err := s.IntelligentTieringConfiguration.Validate(); err != nil { + invalidParams.AddNested("IntelligentTieringConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketIntelligentTieringConfigurationInput) SetBucket(v string) *PutBucketIntelligentTieringConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketIntelligentTieringConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *PutBucketIntelligentTieringConfigurationInput) SetId(v string) *PutBucketIntelligentTieringConfigurationInput { + s.Id = &v + return s +} + +// SetIntelligentTieringConfiguration sets the IntelligentTieringConfiguration field's value. +func (s *PutBucketIntelligentTieringConfigurationInput) SetIntelligentTieringConfiguration(v *IntelligentTieringConfiguration) *PutBucketIntelligentTieringConfigurationInput { + s.IntelligentTieringConfiguration = v + return s +} + +func (s *PutBucketIntelligentTieringConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketIntelligentTieringConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketIntelligentTieringConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketIntelligentTieringConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketIntelligentTieringConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketIntelligentTieringConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketInventoryConfigurationInput struct { + _ struct{} `locationName:"PutBucketInventoryConfigurationRequest" type:"structure" payload:"InventoryConfiguration"` + + // The name of the bucket where the inventory configuration will be stored. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // Specifies the inventory configuration. + // + // InventoryConfiguration is a required field + InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.InventoryConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("InventoryConfiguration")) + } + if s.InventoryConfiguration != nil { + if err := s.InventoryConfiguration.Validate(); err != nil { + invalidParams.AddNested("InventoryConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketInventoryConfigurationInput) SetBucket(v string) *PutBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketInventoryConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketInventoryConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *PutBucketInventoryConfigurationInput) SetId(v string) *PutBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +// SetInventoryConfiguration sets the InventoryConfiguration field's value. +func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *InventoryConfiguration) *PutBucketInventoryConfigurationInput { + s.InventoryConfiguration = v + return s +} + +func (s *PutBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketInventoryConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketInventoryConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketLifecycleConfigurationInput struct { + _ struct{} `locationName:"PutBucketLifecycleConfigurationRequest" type:"structure" payload:"LifecycleConfiguration"` + + // The name of the bucket for which to set the configuration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Container for lifecycle rules. You can add as many as 1,000 rules. + LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLifecycleConfigurationInput) SetBucket(v string) *PutBucketLifecycleConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketLifecycleConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketLifecycleConfigurationInput) SetChecksumAlgorithm(v string) *PutBucketLifecycleConfigurationInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketLifecycleConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketLifecycleConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. +func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *BucketLifecycleConfiguration) *PutBucketLifecycleConfigurationInput { + s.LifecycleConfiguration = v + return s +} + +func (s *PutBucketLifecycleConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketLifecycleConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketLifecycleConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketLifecycleInput struct { + _ struct{} `locationName:"PutBucketLifecycleRequest" type:"structure" payload:"LifecycleConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Container for lifecycle rules. You can add as many as 1000 rules. + // + // For more information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) + // in the Amazon S3 User Guide. + LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLifecycleInput) SetBucket(v string) *PutBucketLifecycleInput { + s.Bucket = &v + return s +} + +func (s *PutBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketLifecycleInput) SetChecksumAlgorithm(v string) *PutBucketLifecycleInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketLifecycleInput) SetExpectedBucketOwner(v string) *PutBucketLifecycleInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. +func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleInput { + s.LifecycleConfiguration = v + return s +} + +func (s *PutBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketLifecycleInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketLifecycleInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLifecycleOutput) GoString() string { + return s.String() +} + +type PutBucketLoggingInput struct { + _ struct{} `locationName:"PutBucketLoggingRequest" type:"structure" payload:"BucketLoggingStatus"` + + // The name of the bucket for which to set the logging parameters. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for logging status information. + // + // BucketLoggingStatus is a required field + BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.BucketLoggingStatus == nil { + invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus")) + } + if s.BucketLoggingStatus != nil { + if err := s.BucketLoggingStatus.Validate(); err != nil { + invalidParams.AddNested("BucketLoggingStatus", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLoggingInput) SetBucket(v string) *PutBucketLoggingInput { + s.Bucket = &v + return s +} + +func (s *PutBucketLoggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketLoggingStatus sets the BucketLoggingStatus field's value. +func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) *PutBucketLoggingInput { + s.BucketLoggingStatus = v + return s +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketLoggingInput) SetChecksumAlgorithm(v string) *PutBucketLoggingInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketLoggingInput) SetExpectedBucketOwner(v string) *PutBucketLoggingInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *PutBucketLoggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketLoggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketLoggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketLoggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLoggingOutput) GoString() string { + return s.String() +} + +type PutBucketMetricsConfigurationInput struct { + _ struct{} `locationName:"PutBucketMetricsConfigurationRequest" type:"structure" payload:"MetricsConfiguration"` + + // The name of the bucket for which the metrics configuration is set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the metrics configuration. The ID has a 64 character + // limit and can only contain letters, numbers, periods, dashes, and underscores. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // Specifies the metrics configuration. + // + // MetricsConfiguration is a required field + MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketMetricsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.MetricsConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("MetricsConfiguration")) + } + if s.MetricsConfiguration != nil { + if err := s.MetricsConfiguration.Validate(); err != nil { + invalidParams.AddNested("MetricsConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketMetricsConfigurationInput) SetBucket(v string) *PutBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketMetricsConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketMetricsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *PutBucketMetricsConfigurationInput) SetId(v string) *PutBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +// SetMetricsConfiguration sets the MetricsConfiguration field's value. +func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsConfiguration) *PutBucketMetricsConfigurationInput { + s.MetricsConfiguration = v + return s +} + +func (s *PutBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketMetricsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketMetricsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketNotificationConfigurationInput struct { + _ struct{} `locationName:"PutBucketNotificationConfigurationRequest" type:"structure" payload:"NotificationConfiguration"` + + // The name of the bucket. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // A container for specifying the notification configuration of the bucket. + // If this element is empty, notifications are turned off for the bucket. + // + // NotificationConfiguration is a required field + NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations. True + // or false value. + SkipDestinationValidation *bool `location:"header" locationName:"x-amz-skip-destination-validation" type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketNotificationConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketNotificationConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketNotificationConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.NotificationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + } + if s.NotificationConfiguration != nil { + if err := s.NotificationConfiguration.Validate(); err != nil { + invalidParams.AddNested("NotificationConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketNotificationConfigurationInput) SetBucket(v string) *PutBucketNotificationConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketNotificationConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketNotificationConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketNotificationConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetNotificationConfiguration sets the NotificationConfiguration field's value. +func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v *NotificationConfiguration) *PutBucketNotificationConfigurationInput { + s.NotificationConfiguration = v + return s +} + +// SetSkipDestinationValidation sets the SkipDestinationValidation field's value. +func (s *PutBucketNotificationConfigurationInput) SetSkipDestinationValidation(v bool) *PutBucketNotificationConfigurationInput { + s.SkipDestinationValidation = &v + return s +} + +func (s *PutBucketNotificationConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketNotificationConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketNotificationConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketNotificationConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketNotificationConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketNotificationConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketNotificationInput struct { + _ struct{} `locationName:"PutBucketNotificationRequest" type:"structure" payload:"NotificationConfiguration"` + + // The name of the bucket. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The container for the configuration. + // + // NotificationConfiguration is a required field + NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketNotificationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketNotificationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketNotificationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.NotificationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketNotificationInput) SetBucket(v string) *PutBucketNotificationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketNotificationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketNotificationInput) SetChecksumAlgorithm(v string) *PutBucketNotificationInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketNotificationInput) SetExpectedBucketOwner(v string) *PutBucketNotificationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetNotificationConfiguration sets the NotificationConfiguration field's value. +func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *NotificationConfigurationDeprecated) *PutBucketNotificationInput { + s.NotificationConfiguration = v + return s +} + +func (s *PutBucketNotificationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketNotificationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketNotificationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketNotificationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketNotificationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketNotificationOutput) GoString() string { + return s.String() +} + +type PutBucketOwnershipControlsInput struct { + _ struct{} `locationName:"PutBucketOwnershipControlsRequest" type:"structure" payload:"OwnershipControls"` + + // The name of the Amazon S3 bucket whose OwnershipControls you want to set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or ObjectWriter) + // that you want to apply to this Amazon S3 bucket. + // + // OwnershipControls is a required field + OwnershipControls *OwnershipControls `locationName:"OwnershipControls" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketOwnershipControlsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketOwnershipControlsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketOwnershipControlsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketOwnershipControlsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.OwnershipControls == nil { + invalidParams.Add(request.NewErrParamRequired("OwnershipControls")) + } + if s.OwnershipControls != nil { + if err := s.OwnershipControls.Validate(); err != nil { + invalidParams.AddNested("OwnershipControls", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketOwnershipControlsInput) SetBucket(v string) *PutBucketOwnershipControlsInput { + s.Bucket = &v + return s +} + +func (s *PutBucketOwnershipControlsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketOwnershipControlsInput) SetExpectedBucketOwner(v string) *PutBucketOwnershipControlsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetOwnershipControls sets the OwnershipControls field's value. +func (s *PutBucketOwnershipControlsInput) SetOwnershipControls(v *OwnershipControls) *PutBucketOwnershipControlsInput { + s.OwnershipControls = v + return s +} + +func (s *PutBucketOwnershipControlsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketOwnershipControlsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketOwnershipControlsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketOwnershipControlsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketOwnershipControlsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketOwnershipControlsOutput) GoString() string { + return s.String() +} + +type PutBucketPolicyInput struct { + _ struct{} `locationName:"PutBucketPolicyRequest" type:"structure" payload:"Policy"` + + // The name of the bucket. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name + // . Virtual-hosted-style requests aren't supported. Directory bucket names + // must be unique in the chosen Availability Zone. Bucket names must also follow + // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). + // For information about bucket naming restrictions, see Directory bucket naming + // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon + // S3 fails the request with the HTTP status code 400 Bad Request. + // + // For the x-amz-checksum-algorithm header, replace algorithm with the supported + // algorithm from the following list: + // + // * CRC32 + // + // * CRC32C + // + // * SHA1 + // + // * SHA256 + // + // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If the individual checksum value you provide through x-amz-checksum-algorithm + // doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, + // Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum + // algorithm that matches the provided value in x-amz-checksum-algorithm . + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // Set this parameter to true to confirm that you want to remove your permissions + // to change this bucket policy in the future. + // + // This functionality is not supported for directory buckets. + ConfirmRemoveSelfBucketAccess *bool `location:"header" locationName:"x-amz-confirm-remove-self-bucket-access" type:"boolean"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. + // If you specify this header, the request fails with the HTTP status code 501 + // Not Implemented. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The bucket policy as a JSON document. + // + // For directory buckets, the only IAM action supported in the bucket policy + // is s3express:CreateSession. + // + // Policy is a required field + Policy *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Policy == nil { + invalidParams.Add(request.NewErrParamRequired("Policy")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketPolicyInput) SetBucket(v string) *PutBucketPolicyInput { + s.Bucket = &v + return s +} + +func (s *PutBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketPolicyInput) SetChecksumAlgorithm(v string) *PutBucketPolicyInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetConfirmRemoveSelfBucketAccess sets the ConfirmRemoveSelfBucketAccess field's value. +func (s *PutBucketPolicyInput) SetConfirmRemoveSelfBucketAccess(v bool) *PutBucketPolicyInput { + s.ConfirmRemoveSelfBucketAccess = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketPolicyInput) SetExpectedBucketOwner(v string) *PutBucketPolicyInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput { + s.Policy = &v + return s +} + +func (s *PutBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketPolicyInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketPolicyOutput) GoString() string { + return s.String() +} + +type PutBucketReplicationInput struct { + _ struct{} `locationName:"PutBucketReplicationRequest" type:"structure" payload:"ReplicationConfiguration"` + + // The name of the bucket + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // A container for replication rules. You can add up to 1,000 rules. The maximum + // size of a replication configuration is 2 MB. + // + // ReplicationConfiguration is a required field + ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // A token to allow Object Lock to be enabled for an existing bucket. + Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.ReplicationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration")) + } + if s.ReplicationConfiguration != nil { + if err := s.ReplicationConfiguration.Validate(); err != nil { + invalidParams.AddNested("ReplicationConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketReplicationInput) SetBucket(v string) *PutBucketReplicationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketReplicationInput) SetChecksumAlgorithm(v string) *PutBucketReplicationInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketReplicationInput) SetExpectedBucketOwner(v string) *PutBucketReplicationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetReplicationConfiguration sets the ReplicationConfiguration field's value. +func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutBucketReplicationInput { + s.ReplicationConfiguration = v + return s +} + +// SetToken sets the Token field's value. +func (s *PutBucketReplicationInput) SetToken(v string) *PutBucketReplicationInput { + s.Token = &v + return s +} + +func (s *PutBucketReplicationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketReplicationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketReplicationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketReplicationOutput) GoString() string { + return s.String() +} + +type PutBucketRequestPaymentInput struct { + _ struct{} `locationName:"PutBucketRequestPaymentRequest" type:"structure" payload:"RequestPaymentConfiguration"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Container for Payer. + // + // RequestPaymentConfiguration is a required field + RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketRequestPaymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketRequestPaymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketRequestPaymentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.RequestPaymentConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("RequestPaymentConfiguration")) + } + if s.RequestPaymentConfiguration != nil { + if err := s.RequestPaymentConfiguration.Validate(); err != nil { + invalidParams.AddNested("RequestPaymentConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketRequestPaymentInput) SetBucket(v string) *PutBucketRequestPaymentInput { + s.Bucket = &v + return s +} + +func (s *PutBucketRequestPaymentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketRequestPaymentInput) SetChecksumAlgorithm(v string) *PutBucketRequestPaymentInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketRequestPaymentInput) SetExpectedBucketOwner(v string) *PutBucketRequestPaymentInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetRequestPaymentConfiguration sets the RequestPaymentConfiguration field's value. +func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *RequestPaymentConfiguration) *PutBucketRequestPaymentInput { + s.RequestPaymentConfiguration = v + return s +} + +func (s *PutBucketRequestPaymentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketRequestPaymentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketRequestPaymentInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +type PutBucketTaggingInput struct { + _ struct{} `locationName:"PutBucketTaggingRequest" type:"structure" payload:"Tagging"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Container for the TagSet and Tag elements. + // + // Tagging is a required field + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Tagging == nil { + invalidParams.Add(request.NewErrParamRequired("Tagging")) + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketTaggingInput) SetBucket(v string) *PutBucketTaggingInput { + s.Bucket = &v + return s +} + +func (s *PutBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketTaggingInput) SetChecksumAlgorithm(v string) *PutBucketTaggingInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketTaggingInput) SetExpectedBucketOwner(v string) *PutBucketTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput { + s.Tagging = v + return s +} + +func (s *PutBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketTaggingOutput) GoString() string { + return s.String() +} + +type PutBucketVersioningInput struct { + _ struct{} `locationName:"PutBucketVersioningRequest" type:"structure" payload:"VersioningConfiguration"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Container for setting the versioning state. + // + // VersioningConfiguration is a required field + VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketVersioningInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketVersioningInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.VersioningConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketVersioningInput) SetBucket(v string) *PutBucketVersioningInput { + s.Bucket = &v + return s +} + +func (s *PutBucketVersioningInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketVersioningInput) SetChecksumAlgorithm(v string) *PutBucketVersioningInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketVersioningInput) SetExpectedBucketOwner(v string) *PutBucketVersioningInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetMFA sets the MFA field's value. +func (s *PutBucketVersioningInput) SetMFA(v string) *PutBucketVersioningInput { + s.MFA = &v + return s +} + +// SetVersioningConfiguration sets the VersioningConfiguration field's value. +func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfiguration) *PutBucketVersioningInput { + s.VersioningConfiguration = v + return s +} + +func (s *PutBucketVersioningInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketVersioningInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketVersioningInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketVersioningOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketVersioningOutput) GoString() string { + return s.String() +} + +type PutBucketWebsiteInput struct { + _ struct{} `locationName:"PutBucketWebsiteRequest" type:"structure" payload:"WebsiteConfiguration"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Container for the request. + // + // WebsiteConfiguration is a required field + WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.WebsiteConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration")) + } + if s.WebsiteConfiguration != nil { + if err := s.WebsiteConfiguration.Validate(); err != nil { + invalidParams.AddNested("WebsiteConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketWebsiteInput) SetBucket(v string) *PutBucketWebsiteInput { + s.Bucket = &v + return s +} + +func (s *PutBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketWebsiteInput) SetChecksumAlgorithm(v string) *PutBucketWebsiteInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketWebsiteInput) SetExpectedBucketOwner(v string) *PutBucketWebsiteInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetWebsiteConfiguration sets the WebsiteConfiguration field's value. +func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput { + s.WebsiteConfiguration = v + return s +} + +func (s *PutBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketWebsiteInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketWebsiteInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketWebsiteOutput) GoString() string { + return s.String() +} + +type PutObjectAclInput struct { + _ struct{} `locationName:"PutObjectAclRequest" type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Contains the elements that set the ACL permissions for an object per grantee. + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The bucket name that contains the object to which you want to attach the + // ACL. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + // + // This functionality is not supported for Amazon S3 on Outposts. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + // + // This functionality is not supported for Amazon S3 on Outposts. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + // + // This functionality is not supported for Amazon S3 on Outposts. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create new objects in the bucket. + // + // For the bucket and object owners of existing objects, also allows deletions + // and overwrites of those objects. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + // + // This functionality is not supported for Amazon S3 on Outposts. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Key for which the PUT action was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Version ID used to reference a specific version of the object. + // + // This functionality is not supported for directory buckets. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutObjectAclInput) SetACL(v string) *PutObjectAclInput { + s.ACL = &v + return s +} + +// SetAccessControlPolicy sets the AccessControlPolicy field's value. +func (s *PutObjectAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutObjectAclInput { + s.AccessControlPolicy = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectAclInput) SetBucket(v string) *PutObjectAclInput { + s.Bucket = &v + return s +} + +func (s *PutObjectAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutObjectAclInput) SetChecksumAlgorithm(v string) *PutObjectAclInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectAclInput) SetExpectedBucketOwner(v string) *PutObjectAclInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutObjectAclInput) SetGrantFullControl(v string) *PutObjectAclInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutObjectAclInput) SetGrantRead(v string) *PutObjectAclInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutObjectAclInput) SetGrantReadACP(v string) *PutObjectAclInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *PutObjectAclInput) SetGrantWrite(v string) *PutObjectAclInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutObjectAclInput) SetGrantWriteACP(v string) *PutObjectAclInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectAclInput) SetKey(v string) *PutObjectAclInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectAclInput) SetRequestPayer(v string) *PutObjectAclInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput { + s.VersionId = &v + return s +} + +func (s *PutObjectAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectAclInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectAclOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectAclOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput { + s.RequestCharged = &v + return s +} + +type PutObjectInput struct { + _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"` + + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) + // in the Amazon S3 User Guide. + // + // When adding a new object, you can use headers to grant ACL-based permissions + // to individual Amazon Web Services accounts or to predefined groups defined + // by Amazon S3. These permissions are then added to the ACL on the object. + // By default, all objects are private. Only the owner has full access control. + // For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html) + // in the Amazon S3 User Guide. + // + // If the bucket that you're uploading objects to uses the bucket owner enforced + // setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. + // Buckets that use this setting only accept PUT requests that don't specify + // an ACL or PUT requests that specify bucket owner full control ACLs, such + // as the bucket-owner-full-control canned ACL or an equivalent form of this + // ACL expressed in the XML format. PUT requests that contain other ACLs (for + // example, custom grants to certain Amazon Web Services accounts) fail and + // return a 400 error with the error code AccessControlListNotSupported. For + // more information, see Controlling ownership of objects and disabling ACLs + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + // The bucket name to which the PUT action was initiated. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). + // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for + // object encryption with SSE-KMS. + // + // Specifying this header with a PUT action doesn’t affect bucket-level settings + // for S3 Bucket Key. + // + // This functionality is not supported for directory buckets. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Can be used to specify caching behavior along the request/reply chain. For + // more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon + // S3 fails the request with the HTTP status code 400 Bad Request. + // + // For the x-amz-checksum-algorithm header, replace algorithm with the supported + // algorithm from the following list: + // + // * CRC32 + // + // * CRC32C + // + // * SHA1 + // + // * SHA256 + // + // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If the individual checksum value you provide through x-amz-checksum-algorithm + // doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, + // Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum + // algorithm that matches the provided value in x-amz-checksum-algorithm . + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32 checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32C checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 160-bit SHA-1 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 256-bit SHA-256 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + + // Specifies presentational information for the object. For more information, + // see https://www.rfc-editor.org/rfc/rfc6266#section-4 (https://www.rfc-editor.org/rfc/rfc6266#section-4). + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding + // (https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding). + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length + // (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length). + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The base64-encoded 128-bit MD5 digest of the message (without the headers) + // according to RFC 1864. This header can be used as a message integrity check + // to verify that the data is the same data that was originally sent. Although + // it is optional, we recommend using the Content-MD5 mechanism as an end-to-end + // integrity check. For more information about REST request authentication, + // see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). + // + // The Content-MD5 header is required for any request to upload an object with + // a retention period configured using Amazon S3 Object Lock. For more information + // about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + + // A standard MIME type describing the format of the contents. For more information, + // see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type). + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The date and time at which the object is no longer cacheable. For more information, + // see https://www.rfc-editor.org/rfc/rfc7234#section-5.3 (https://www.rfc-editor.org/rfc/rfc7234#section-5.3). + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Object key for which the PUT action was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether a legal hold will be applied to this object. For more information + // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode that you want to apply to this object. + // + // This functionality is not supported for directory buckets. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when you want this object's Object Lock to expire. Must + // be formatted as a timestamp parameter. + // + // This functionality is not supported for directory buckets. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + // + // This functionality is not supported for directory buckets. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by PutObjectInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the Amazon Web Services KMS Encryption Context to use for object + // encryption. The value of this header is a base64-encoded UTF-8 string holding + // JSON with the encryption context key-value pairs. This value is stored as + // object metadata and automatically gets passed on to Amazon Web Services KMS + // for future GetObject or CopyObject operations on this object. This value + // must be explicitly added during CopyObject operations. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by PutObjectInput's + // String and GoString methods. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, + // this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key Management + // Service (KMS) symmetric encryption customer managed key that was used for + // the object. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, + // but do not providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 + // uses the Amazon Web Services managed key (aws/s3) to protect the data. If + // the KMS key does not exist in the same account that's issuing the command, + // you must use the full ARN and not just the ID. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by PutObjectInput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm that was used when you store this object + // in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse). + // + // General purpose buckets - You have four mutually exclusive options to protect + // data using server-side encryption in Amazon S3, depending on how you choose + // to manage the encryption keys. Specifically, the encryption key options are + // Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or + // DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with + // server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. + // You can optionally tell Amazon S3 to encrypt data at rest by using server-side + // encryption with other key options. For more information, see Using Server-Side + // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) + // in the Amazon S3 User Guide. + // + // Directory buckets - For directory buckets, only the server-side encryption + // with Amazon S3 managed keys (SSE-S3) (AES256) value is supported. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high availability. + // Depending on performance needs, you can specify a different Storage Class. + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 User Guide. + // + // * For directory buckets, only the S3 Express One Zone storage class is + // supported to store newly created objects. + // + // * Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + // (For example, "Key1=Value1") + // + // This functionality is not supported for directory buckets. + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. For information about object + // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html) + // in the Amazon S3 User Guide. + // + // In the following example, the request header sets the redirect to an object + // (anotherPage.html) in the same bucket: + // + // x-amz-website-redirect-location: /anotherPage.html + // + // In the following example, the request header sets the object redirect to + // another website: + // + // x-amz-website-redirect-location: http://www.example.com/ + // + // For more information about website hosting in Amazon S3, see Hosting Websites + // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) + // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutObjectInput) SetACL(v string) *PutObjectInput { + s.ACL = &v + return s +} + +// SetBody sets the Body field's value. +func (s *PutObjectInput) SetBody(v io.ReadSeeker) *PutObjectInput { + s.Body = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectInput) SetBucket(v string) *PutObjectInput { + s.Bucket = &v + return s +} + +func (s *PutObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *PutObjectInput) SetBucketKeyEnabled(v bool) *PutObjectInput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput { + s.CacheControl = &v + return s +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutObjectInput) SetChecksumAlgorithm(v string) *PutObjectInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *PutObjectInput) SetChecksumCRC32(v string) *PutObjectInput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *PutObjectInput) SetChecksumCRC32C(v string) *PutObjectInput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *PutObjectInput) SetChecksumSHA1(v string) *PutObjectInput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *PutObjectInput) SetChecksumSHA256(v string) *PutObjectInput { + s.ChecksumSHA256 = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *PutObjectInput) SetContentDisposition(v string) *PutObjectInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *PutObjectInput) SetContentEncoding(v string) *PutObjectInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *PutObjectInput) SetContentLanguage(v string) *PutObjectInput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *PutObjectInput) SetContentLength(v int64) *PutObjectInput { + s.ContentLength = &v + return s +} + +// SetContentMD5 sets the ContentMD5 field's value. +func (s *PutObjectInput) SetContentMD5(v string) *PutObjectInput { + s.ContentMD5 = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *PutObjectInput) SetContentType(v string) *PutObjectInput { + s.ContentType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectInput) SetExpectedBucketOwner(v string) *PutObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *PutObjectInput) SetExpires(v time.Time) *PutObjectInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutObjectInput) SetGrantFullControl(v string) *PutObjectInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutObjectInput) SetGrantRead(v string) *PutObjectInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutObjectInput) SetGrantReadACP(v string) *PutObjectInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutObjectInput) SetGrantWriteACP(v string) *PutObjectInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectInput) SetKey(v string) *PutObjectInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *PutObjectInput) SetMetadata(v map[string]*string) *PutObjectInput { + s.Metadata = v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *PutObjectInput) SetObjectLockLegalHoldStatus(v string) *PutObjectInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *PutObjectInput) SetObjectLockMode(v string) *PutObjectInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *PutObjectInput) SetObjectLockRetainUntilDate(v time.Time) *PutObjectInput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectInput) SetRequestPayer(v string) *PutObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *PutObjectInput) SetSSECustomerAlgorithm(v string) *PutObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *PutObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *PutObjectInput) SetSSEKMSEncryptionContext(v string) *PutObjectInput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *PutObjectInput) SetServerSideEncryption(v string) *PutObjectInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutObjectInput) SetTagging(v string) *PutObjectInput { + s.Tagging = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { + s.WebsiteRedirectLocation = &v + return s +} + +func (s *PutObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectLegalHoldInput struct { + _ struct{} `locationName:"PutObjectLegalHoldRequest" type:"structure" payload:"LegalHold"` + + // The bucket name containing the object that you want to place a legal hold + // on. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key name for the object that you want to place a legal hold on. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Container element for the legal hold configuration you want to apply to the + // specified object. + LegalHold *ObjectLockLegalHold `locationName:"LegalHold" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The version ID of the object that you want to place a legal hold on. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectLegalHoldInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectLegalHoldInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectLegalHoldInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectLegalHoldInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectLegalHoldInput) SetBucket(v string) *PutObjectLegalHoldInput { + s.Bucket = &v + return s +} + +func (s *PutObjectLegalHoldInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutObjectLegalHoldInput) SetChecksumAlgorithm(v string) *PutObjectLegalHoldInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectLegalHoldInput) SetExpectedBucketOwner(v string) *PutObjectLegalHoldInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectLegalHoldInput) SetKey(v string) *PutObjectLegalHoldInput { + s.Key = &v + return s +} + +// SetLegalHold sets the LegalHold field's value. +func (s *PutObjectLegalHoldInput) SetLegalHold(v *ObjectLockLegalHold) *PutObjectLegalHoldInput { + s.LegalHold = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectLegalHoldInput) SetRequestPayer(v string) *PutObjectLegalHoldInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectLegalHoldInput) SetVersionId(v string) *PutObjectLegalHoldInput { + s.VersionId = &v + return s +} + +func (s *PutObjectLegalHoldInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectLegalHoldInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectLegalHoldInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectLegalHoldOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectLegalHoldOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectLegalHoldOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectLegalHoldOutput) SetRequestCharged(v string) *PutObjectLegalHoldOutput { + s.RequestCharged = &v + return s +} + +type PutObjectLockConfigurationInput struct { + _ struct{} `locationName:"PutObjectLockConfigurationRequest" type:"structure" payload:"ObjectLockConfiguration"` + + // The bucket whose Object Lock configuration you want to create or replace. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The Object Lock configuration that you want to apply to the specified bucket. + ObjectLockConfiguration *ObjectLockConfiguration `locationName:"ObjectLockConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // A token to allow Object Lock to be enabled for an existing bucket. + Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectLockConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectLockConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectLockConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectLockConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectLockConfigurationInput) SetBucket(v string) *PutObjectLockConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutObjectLockConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutObjectLockConfigurationInput) SetChecksumAlgorithm(v string) *PutObjectLockConfigurationInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectLockConfigurationInput) SetExpectedBucketOwner(v string) *PutObjectLockConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetObjectLockConfiguration sets the ObjectLockConfiguration field's value. +func (s *PutObjectLockConfigurationInput) SetObjectLockConfiguration(v *ObjectLockConfiguration) *PutObjectLockConfigurationInput { + s.ObjectLockConfiguration = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectLockConfigurationInput) SetRequestPayer(v string) *PutObjectLockConfigurationInput { + s.RequestPayer = &v + return s +} + +// SetToken sets the Token field's value. +func (s *PutObjectLockConfigurationInput) SetToken(v string) *PutObjectLockConfigurationInput { + s.Token = &v + return s +} + +func (s *PutObjectLockConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectLockConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectLockConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectLockConfigurationOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectLockConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectLockConfigurationOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectLockConfigurationOutput) SetRequestCharged(v string) *PutObjectLockConfigurationOutput { + s.RequestCharged = &v + return s +} + +type PutObjectOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the uploaded object uses an S3 Bucket Key for server-side + // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + + // Entity tag for the uploaded object. + // + // General purpose buckets - To ensure that data is not corrupted traversing + // the network, for objects where the ETag is the MD5 digest of the object, + // you can calculate the MD5 while putting an object to Amazon S3 and compare + // the returned ETag to the calculated MD5 value. + // + // Directory buckets - The ETag for the object in a directory bucket isn't the + // MD5 digest of the object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the expiration is configured for the object (see PutBucketLifecycleConfiguration + // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)) + // in the Amazon S3 User Guide, the response includes this header. It includes + // the expiry-date and rule-id key-value pairs that provide information about + // object expiration. The value of the rule-id is URL-encoded. + // + // This functionality is not supported for directory buckets. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to confirm the encryption algorithm + // that's used. + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide the round-trip message integrity + // verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, indicates the Amazon Web Services KMS Encryption Context to use + // for object encryption. The value of this header is a base64-encoded UTF-8 + // string holding JSON with the encryption context key-value pairs. This value + // is stored as object metadata and automatically gets passed on to Amazon Web + // Services KMS for future GetObject or CopyObject operations on this object. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by PutObjectOutput's + // String and GoString methods. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, + // this header indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by PutObjectOutput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256, aws:kms, aws:kms:dsse). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version ID of the object. + // + // If you enable versioning for a bucket, Amazon S3 automatically generates + // a unique version ID for the object being stored. Amazon S3 returns this ID + // in the response. When you enable versioning for a bucket, if Amazon S3 receives + // multiple write requests for the same object simultaneously, it stores all + // of the objects. For more information about versioning, see Adding Objects + // to Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html) + // in the Amazon S3 User Guide. For information about returning the versioning + // state of a bucket, see GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). + // + // This functionality is not supported for directory buckets. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectOutput) GoString() string { + return s.String() +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *PutObjectOutput) SetBucketKeyEnabled(v bool) *PutObjectOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *PutObjectOutput) SetChecksumCRC32(v string) *PutObjectOutput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *PutObjectOutput) SetChecksumCRC32C(v string) *PutObjectOutput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *PutObjectOutput) SetChecksumSHA1(v string) *PutObjectOutput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *PutObjectOutput) SetChecksumSHA256(v string) *PutObjectOutput { + s.ChecksumSHA256 = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *PutObjectOutput) SetETag(v string) *PutObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *PutObjectOutput) SetExpiration(v string) *PutObjectOutput { + s.Expiration = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectOutput) SetRequestCharged(v string) *PutObjectOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *PutObjectOutput) SetSSECustomerAlgorithm(v string) *PutObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *PutObjectOutput) SetSSEKMSEncryptionContext(v string) *PutObjectOutput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *PutObjectOutput) SetServerSideEncryption(v string) *PutObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { + s.VersionId = &v + return s +} + +type PutObjectRetentionInput struct { + _ struct{} `locationName:"PutObjectRetentionRequest" type:"structure" payload:"Retention"` + + // The bucket name that contains the object you want to apply this Object Retention + // configuration to. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates whether this action should bypass Governance-mode restrictions. + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key name for the object that you want to apply this Object Retention + // configuration to. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The container element for the Object Retention configuration. + Retention *ObjectLockRetention `locationName:"Retention" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The version ID for the object that you want to apply this Object Retention + // configuration to. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectRetentionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectRetentionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectRetentionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectRetentionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectRetentionInput) SetBucket(v string) *PutObjectRetentionInput { + s.Bucket = &v + return s +} + +func (s *PutObjectRetentionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *PutObjectRetentionInput) SetBypassGovernanceRetention(v bool) *PutObjectRetentionInput { + s.BypassGovernanceRetention = &v + return s +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutObjectRetentionInput) SetChecksumAlgorithm(v string) *PutObjectRetentionInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectRetentionInput) SetExpectedBucketOwner(v string) *PutObjectRetentionInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectRetentionInput) SetKey(v string) *PutObjectRetentionInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectRetentionInput) SetRequestPayer(v string) *PutObjectRetentionInput { + s.RequestPayer = &v + return s +} + +// SetRetention sets the Retention field's value. +func (s *PutObjectRetentionInput) SetRetention(v *ObjectLockRetention) *PutObjectRetentionInput { + s.Retention = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectRetentionInput) SetVersionId(v string) *PutObjectRetentionInput { + s.VersionId = &v + return s +} + +func (s *PutObjectRetentionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectRetentionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectRetentionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectRetentionOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectRetentionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectRetentionOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectRetentionOutput) SetRequestCharged(v string) *PutObjectRetentionOutput { + s.RequestCharged = &v + return s +} + +type PutObjectTaggingInput struct { + _ struct{} `locationName:"PutObjectTaggingRequest" type:"structure" payload:"Tagging"` + + // The bucket name containing the object. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Name of the object key. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Container for the TagSet and Tag elements + // + // Tagging is a required field + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The versionId of the object that the tag-set will be added to. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Tagging == nil { + invalidParams.Add(request.NewErrParamRequired("Tagging")) + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput { + s.Bucket = &v + return s +} + +func (s *PutObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutObjectTaggingInput) SetChecksumAlgorithm(v string) *PutObjectTaggingInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectTaggingInput) SetExpectedBucketOwner(v string) *PutObjectTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectTaggingInput) SetRequestPayer(v string) *PutObjectTaggingInput { + s.RequestPayer = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutObjectTaggingInput) SetTagging(v *Tagging) *PutObjectTaggingInput { + s.Tagging = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput { + s.VersionId = &v + return s +} + +func (s *PutObjectTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // The versionId of the object the tag-set was added to. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput { + s.VersionId = &v + return s +} + +type PutPublicAccessBlockInput struct { + _ struct{} `locationName:"PutPublicAccessBlockRequest" type:"structure" payload:"PublicAccessBlockConfiguration"` + + // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you + // want to set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The PublicAccessBlock configuration that you want to apply to this Amazon + // S3 bucket. You can enable the configuration options in any combination. For + // more information about when Amazon S3 considers a bucket or object public, + // see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) + // in the Amazon S3 User Guide. + // + // PublicAccessBlockConfiguration is a required field + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `locationName:"PublicAccessBlockConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutPublicAccessBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutPublicAccessBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutPublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutPublicAccessBlockInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.PublicAccessBlockConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("PublicAccessBlockConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutPublicAccessBlockInput) SetBucket(v string) *PutPublicAccessBlockInput { + s.Bucket = &v + return s +} + +func (s *PutPublicAccessBlockInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutPublicAccessBlockInput) SetChecksumAlgorithm(v string) *PutPublicAccessBlockInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutPublicAccessBlockInput) SetExpectedBucketOwner(v string) *PutPublicAccessBlockInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. +func (s *PutPublicAccessBlockInput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *PutPublicAccessBlockInput { + s.PublicAccessBlockConfiguration = v + return s +} + +func (s *PutPublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutPublicAccessBlockInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutPublicAccessBlockInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutPublicAccessBlockOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutPublicAccessBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutPublicAccessBlockOutput) GoString() string { + return s.String() +} + +// Specifies the configuration for publishing messages to an Amazon Simple Queue +// Service (Amazon SQS) queue when Amazon S3 detects specified events. +type QueueConfiguration struct { + _ struct{} `type:"structure"` + + // A collection of bucket events for which to send notifications + // + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true" enum:"Event"` + + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring event notifications using object key name filtering + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) + // in the Amazon S3 User Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 + // publishes a message when it detects events of the specified type. + // + // QueueArn is a required field + QueueArn *string `locationName:"Queue" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueueConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueueConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *QueueConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "QueueConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.QueueArn == nil { + invalidParams.Add(request.NewErrParamRequired("QueueArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *QueueConfiguration) SetEvents(v []*string) *QueueConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *QueueConfiguration) SetFilter(v *NotificationConfigurationFilter) *QueueConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *QueueConfiguration) SetId(v string) *QueueConfiguration { + s.Id = &v + return s +} + +// SetQueueArn sets the QueueArn field's value. +func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration { + s.QueueArn = &v + return s +} + +// This data type is deprecated. Use QueueConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_QueueConfiguration.html) +// for the same purposes. This data type specifies the configuration for publishing +// messages to an Amazon Simple Queue Service (Amazon SQS) queue when Amazon +// S3 detects specified events. +type QueueConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // The bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated + Event *string `deprecated:"true" type:"string" enum:"Event"` + + // A collection of bucket events for which to send notifications. + Events []*string `locationName:"Event" type:"list" flattened:"true" enum:"Event"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 + // publishes a message when it detects events of the specified type. + Queue *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueueConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueueConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetEvent sets the Event field's value. +func (s *QueueConfigurationDeprecated) SetEvent(v string) *QueueConfigurationDeprecated { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *QueueConfigurationDeprecated) SetEvents(v []*string) *QueueConfigurationDeprecated { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *QueueConfigurationDeprecated) SetId(v string) *QueueConfigurationDeprecated { + s.Id = &v + return s +} + +// SetQueue sets the Queue field's value. +func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDeprecated { + s.Queue = &v + return s +} + +// The container for the records event. +type RecordsEvent struct { + _ struct{} `locationName:"RecordsEvent" type:"structure" payload:"Payload"` + + // The byte array of partial, one or more result records. + // Payload is automatically base64 encoded/decoded by the SDK. + Payload []byte `type:"blob"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RecordsEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RecordsEvent) GoString() string { + return s.String() +} + +// SetPayload sets the Payload field's value. +func (s *RecordsEvent) SetPayload(v []byte) *RecordsEvent { + s.Payload = v + return s +} + +// The RecordsEvent is and event in the SelectObjectContentEventStream group of events. +func (s *RecordsEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the RecordsEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *RecordsEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + s.Payload = make([]byte, len(msg.Payload)) + copy(s.Payload, msg.Payload) + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *RecordsEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + msg.Headers.Set(":content-type", eventstream.StringValue("application/octet-stream")) + msg.Payload = s.Payload + return msg, err +} + +// Specifies how requests are redirected. In the event of an error, you can +// specify a different error code to return. +type Redirect struct { + _ struct{} `type:"structure"` + + // The host name to use in the redirect request. + HostName *string `type:"string"` + + // The HTTP redirect code to use on the response. Not required if one of the + // siblings is present. + HttpRedirectCode *string `type:"string"` + + // Protocol to use when redirecting requests. The default is the protocol that + // is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` + + // The object key prefix to use in the redirect request. For example, to redirect + // requests for all pages with prefix docs/ (objects in the docs/ folder) to + // documents/, you can set a condition block with KeyPrefixEquals set to docs/ + // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required + // if one of the siblings is present. Can be present only if ReplaceKeyWith + // is not provided. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + ReplaceKeyPrefixWith *string `type:"string"` + + // The specific object key to use in the redirect request. For example, redirect + // request to error.html. Not required if one of the siblings is present. Can + // be present only if ReplaceKeyPrefixWith is not provided. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + ReplaceKeyWith *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Redirect) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Redirect) GoString() string { + return s.String() +} + +// SetHostName sets the HostName field's value. +func (s *Redirect) SetHostName(v string) *Redirect { + s.HostName = &v + return s +} + +// SetHttpRedirectCode sets the HttpRedirectCode field's value. +func (s *Redirect) SetHttpRedirectCode(v string) *Redirect { + s.HttpRedirectCode = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *Redirect) SetProtocol(v string) *Redirect { + s.Protocol = &v + return s +} + +// SetReplaceKeyPrefixWith sets the ReplaceKeyPrefixWith field's value. +func (s *Redirect) SetReplaceKeyPrefixWith(v string) *Redirect { + s.ReplaceKeyPrefixWith = &v + return s +} + +// SetReplaceKeyWith sets the ReplaceKeyWith field's value. +func (s *Redirect) SetReplaceKeyWith(v string) *Redirect { + s.ReplaceKeyWith = &v + return s +} + +// Specifies the redirect behavior of all requests to a website endpoint of +// an Amazon S3 bucket. +type RedirectAllRequestsTo struct { + _ struct{} `type:"structure"` + + // Name of the host where requests are redirected. + // + // HostName is a required field + HostName *string `type:"string" required:"true"` + + // Protocol to use when redirecting requests. The default is the protocol that + // is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RedirectAllRequestsTo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RedirectAllRequestsTo) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RedirectAllRequestsTo) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RedirectAllRequestsTo"} + if s.HostName == nil { + invalidParams.Add(request.NewErrParamRequired("HostName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHostName sets the HostName field's value. +func (s *RedirectAllRequestsTo) SetHostName(v string) *RedirectAllRequestsTo { + s.HostName = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo { + s.Protocol = &v + return s +} + +// A filter that you can specify for selection for modifications on replicas. +// Amazon S3 doesn't replicate replica modifications by default. In the latest +// version of replication configuration (when Filter is specified), you can +// specify this element and set the status to Enabled to replicate modifications +// on replicas. +// +// If you don't specify the Filter element, Amazon S3 assumes that the replication +// configuration is the earlier version, V1. In the earlier version, this element +// is not allowed. +type ReplicaModifications struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 replicates modifications on replicas. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ReplicaModificationsStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaModifications) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaModifications) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicaModifications) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicaModifications"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *ReplicaModifications) SetStatus(v string) *ReplicaModifications { + s.Status = &v + return s +} + +// A container for replication rules. You can add up to 1,000 rules. The maximum +// size of a replication configuration is 2 MB. +type ReplicationConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) + // role that Amazon S3 assumes when replicating objects. For more information, + // see How to Set Up Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html) + // in the Amazon S3 User Guide. + // + // Role is a required field + Role *string `type:"string" required:"true"` + + // A container for one or more replication rules. A replication configuration + // must have at least one rule and can contain a maximum of 1,000 rules. + // + // Rules is a required field + Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationConfiguration"} + if s.Role == nil { + invalidParams.Add(request.NewErrParamRequired("Role")) + } + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRole sets the Role field's value. +func (s *ReplicationConfiguration) SetRole(v string) *ReplicationConfiguration { + s.Role = &v + return s +} + +// SetRules sets the Rules field's value. +func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationConfiguration { + s.Rules = v + return s +} + +// Specifies which Amazon S3 objects to replicate and where to store the replicas. +type ReplicationRule struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 replicates delete markers. If you specify a Filter + // in your replication configuration, you must also include a DeleteMarkerReplication + // element. If your Filter includes a Tag element, the DeleteMarkerReplication + // Status must be set to Disabled, because Amazon S3 does not support replicating + // delete markers for tag-based rules. For an example configuration, see Basic + // Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). + // + // For more information about delete marker replication, see Basic Rule Configuration + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html). + // + // If you are using an earlier version of the replication configuration, Amazon + // S3 handles replication of delete markers differently. For more information, + // see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). + DeleteMarkerReplication *DeleteMarkerReplication `type:"structure"` + + // A container for information about the replication destination and its configurations + // including enabling the S3 Replication Time Control (S3 RTC). + // + // Destination is a required field + Destination *Destination `type:"structure" required:"true"` + + // Optional configuration to replicate existing source bucket objects. For more + // information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) + // in the Amazon S3 User Guide. + ExistingObjectReplication *ExistingObjectReplication `type:"structure"` + + // A filter that identifies the subset of objects to which the replication rule + // applies. A Filter must specify exactly one Prefix, Tag, or an And child element. + Filter *ReplicationRuleFilter `type:"structure"` + + // A unique identifier for the rule. The maximum value is 255 characters. + ID *string `type:"string"` + + // An object key name prefix that identifies the object or objects to which + // the rule applies. The maximum prefix length is 1,024 characters. To include + // all objects in a bucket, specify an empty string. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Deprecated: Prefix has been deprecated + Prefix *string `deprecated:"true" type:"string"` + + // The priority indicates which rule has precedence whenever two or more replication + // rules conflict. Amazon S3 will attempt to replicate objects according to + // all replication rules. However, if there are two or more rules with the same + // destination bucket, then objects will be replicated according to the rule + // with the highest priority. The higher the number, the higher the priority. + // + // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) + // in the Amazon S3 User Guide. + Priority *int64 `type:"integer"` + + // A container that describes additional filters for identifying the source + // objects that you want to replicate. You can choose to enable or disable the + // replication of these objects. Currently, Amazon S3 supports only the filter + // that you can specify for objects created with server-side encryption using + // a customer managed key stored in Amazon Web Services Key Management Service + // (SSE-KMS). + SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"` + + // Specifies whether the rule is enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRule"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + if s.ExistingObjectReplication != nil { + if err := s.ExistingObjectReplication.Validate(); err != nil { + invalidParams.AddNested("ExistingObjectReplication", err.(request.ErrInvalidParams)) + } + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.SourceSelectionCriteria != nil { + if err := s.SourceSelectionCriteria.Validate(); err != nil { + invalidParams.AddNested("SourceSelectionCriteria", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeleteMarkerReplication sets the DeleteMarkerReplication field's value. +func (s *ReplicationRule) SetDeleteMarkerReplication(v *DeleteMarkerReplication) *ReplicationRule { + s.DeleteMarkerReplication = v + return s +} + +// SetDestination sets the Destination field's value. +func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule { + s.Destination = v + return s +} + +// SetExistingObjectReplication sets the ExistingObjectReplication field's value. +func (s *ReplicationRule) SetExistingObjectReplication(v *ExistingObjectReplication) *ReplicationRule { + s.ExistingObjectReplication = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *ReplicationRule) SetFilter(v *ReplicationRuleFilter) *ReplicationRule { + s.Filter = v + return s +} + +// SetID sets the ID field's value. +func (s *ReplicationRule) SetID(v string) *ReplicationRule { + s.ID = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule { + s.Prefix = &v + return s +} + +// SetPriority sets the Priority field's value. +func (s *ReplicationRule) SetPriority(v int64) *ReplicationRule { + s.Priority = &v + return s +} + +// SetSourceSelectionCriteria sets the SourceSelectionCriteria field's value. +func (s *ReplicationRule) SetSourceSelectionCriteria(v *SourceSelectionCriteria) *ReplicationRule { + s.SourceSelectionCriteria = v + return s +} + +// SetStatus sets the Status field's value. +func (s *ReplicationRule) SetStatus(v string) *ReplicationRule { + s.Status = &v + return s +} + +// A container for specifying rule filters. The filters determine the subset +// of objects to which the rule applies. This element is required only if you +// specify more than one filter. +// +// For example: +// +// - If you specify both a Prefix and a Tag filter, wrap these filters in +// an And tag. +// +// - If you specify a filter based on multiple tags, wrap the Tag elements +// in an And tag. +type ReplicationRuleAndOperator struct { + _ struct{} `type:"structure"` + + // An object key name prefix that identifies the subset of objects to which + // the rule applies. + Prefix *string `type:"string"` + + // An array of tags containing key and value pairs. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationRuleAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationRuleAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRuleAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRuleAndOperator) SetPrefix(v string) *ReplicationRuleAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ReplicationRuleAndOperator) SetTags(v []*Tag) *ReplicationRuleAndOperator { + s.Tags = v + return s +} + +// A filter that identifies the subset of objects to which the replication rule +// applies. A Filter must specify exactly one Prefix, Tag, or an And child element. +type ReplicationRuleFilter struct { + _ struct{} `type:"structure"` + + // A container for specifying rule filters. The filters determine the subset + // of objects to which the rule applies. This element is required only if you + // specify more than one filter. For example: + // + // * If you specify both a Prefix and a Tag filter, wrap these filters in + // an And tag. + // + // * If you specify a filter based on multiple tags, wrap the Tag elements + // in an And tag. + And *ReplicationRuleAndOperator `type:"structure"` + + // An object key name prefix that identifies the subset of objects to which + // the rule applies. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + Prefix *string `type:"string"` + + // A container for specifying a tag key and value. + // + // The rule applies only to objects that have the tag in their tag set. + Tag *Tag `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationRuleFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationRuleFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRuleFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *ReplicationRuleFilter) SetAnd(v *ReplicationRuleAndOperator) *ReplicationRuleFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRuleFilter) SetPrefix(v string) *ReplicationRuleFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *ReplicationRuleFilter) SetTag(v *Tag) *ReplicationRuleFilter { + s.Tag = v + return s +} + +// A container specifying S3 Replication Time Control (S3 RTC) related information, +// including whether S3 RTC is enabled and the time when all objects and operations +// on objects must be replicated. Must be specified together with a Metrics +// block. +type ReplicationTime struct { + _ struct{} `type:"structure"` + + // Specifies whether the replication time is enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ReplicationTimeStatus"` + + // A container specifying the time by which replication should be complete for + // all objects and operations on objects. + // + // Time is a required field + Time *ReplicationTimeValue `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationTime) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationTime) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationTime) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationTime"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Time == nil { + invalidParams.Add(request.NewErrParamRequired("Time")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *ReplicationTime) SetStatus(v string) *ReplicationTime { + s.Status = &v + return s +} + +// SetTime sets the Time field's value. +func (s *ReplicationTime) SetTime(v *ReplicationTimeValue) *ReplicationTime { + s.Time = v + return s +} + +// A container specifying the time value for S3 Replication Time Control (S3 +// RTC) and replication metrics EventThreshold. +type ReplicationTimeValue struct { + _ struct{} `type:"structure"` + + // Contains an integer specifying time in minutes. + // + // Valid value: 15 + Minutes *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationTimeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationTimeValue) GoString() string { + return s.String() +} + +// SetMinutes sets the Minutes field's value. +func (s *ReplicationTimeValue) SetMinutes(v int64) *ReplicationTimeValue { + s.Minutes = &v + return s +} + +// Container for Payer. +type RequestPaymentConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + // + // Payer is a required field + Payer *string `type:"string" required:"true" enum:"Payer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RequestPaymentConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RequestPaymentConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestPaymentConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestPaymentConfiguration"} + if s.Payer == nil { + invalidParams.Add(request.NewErrParamRequired("Payer")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPayer sets the Payer field's value. +func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfiguration { + s.Payer = &v + return s +} + +// Container for specifying if periodic QueryProgress messages should be sent. +type RequestProgress struct { + _ struct{} `type:"structure"` + + // Specifies whether periodic QueryProgress frames should be sent. Valid values: + // TRUE, FALSE. Default value: FALSE. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RequestProgress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RequestProgress) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *RequestProgress) SetEnabled(v bool) *RequestProgress { + s.Enabled = &v + return s +} + +type RestoreObjectInput struct { + _ struct{} `locationName:"RestoreObjectRequest" type:"structure" payload:"RestoreRequest"` + + // The bucket name containing the object to restore. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which the action was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Container for restore job parameters. + RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.RestoreRequest != nil { + if err := s.RestoreRequest.Validate(); err != nil { + invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *RestoreObjectInput) SetBucket(v string) *RestoreObjectInput { + s.Bucket = &v + return s +} + +func (s *RestoreObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *RestoreObjectInput) SetChecksumAlgorithm(v string) *RestoreObjectInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *RestoreObjectInput) SetExpectedBucketOwner(v string) *RestoreObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *RestoreObjectInput) SetRequestPayer(v string) *RestoreObjectInput { + s.RequestPayer = &v + return s +} + +// SetRestoreRequest sets the RestoreRequest field's value. +func (s *RestoreObjectInput) SetRestoreRequest(v *RestoreRequest) *RestoreObjectInput { + s.RestoreRequest = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput { + s.VersionId = &v + return s +} + +func (s *RestoreObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *RestoreObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s RestoreObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type RestoreObjectOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Indicates the path in the provided S3 output location where Select results + // will be restored to. + RestoreOutputPath *string `location:"header" locationName:"x-amz-restore-output-path" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreObjectOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *RestoreObjectOutput) SetRequestCharged(v string) *RestoreObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestoreOutputPath sets the RestoreOutputPath field's value. +func (s *RestoreObjectOutput) SetRestoreOutputPath(v string) *RestoreObjectOutput { + s.RestoreOutputPath = &v + return s +} + +// Container for restore job parameters. +type RestoreRequest struct { + _ struct{} `type:"structure"` + + // Lifetime of the active copy in days. Do not use with restores that specify + // OutputLocation. + // + // The Days element is required for regular restores, and must not be provided + // for select requests. + Days *int64 `type:"integer"` + + // The optional description for the job. + Description *string `type:"string"` + + // S3 Glacier related parameters pertaining to this job. Do not use with restores + // that specify OutputLocation. + GlacierJobParameters *GlacierJobParameters `type:"structure"` + + // Describes the location where the restore job's output is stored. + OutputLocation *OutputLocation `type:"structure"` + + // Describes the parameters for Select job types. + SelectParameters *SelectParameters `type:"structure"` + + // Retrieval tier at which the restore will be processed. + Tier *string `type:"string" enum:"Tier"` + + // Type of restore request. + Type *string `type:"string" enum:"RestoreRequestType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"} + if s.GlacierJobParameters != nil { + if err := s.GlacierJobParameters.Validate(); err != nil { + invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams)) + } + } + if s.OutputLocation != nil { + if err := s.OutputLocation.Validate(); err != nil { + invalidParams.AddNested("OutputLocation", err.(request.ErrInvalidParams)) + } + } + if s.SelectParameters != nil { + if err := s.SelectParameters.Validate(); err != nil { + invalidParams.AddNested("SelectParameters", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDays sets the Days field's value. +func (s *RestoreRequest) SetDays(v int64) *RestoreRequest { + s.Days = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *RestoreRequest) SetDescription(v string) *RestoreRequest { + s.Description = &v + return s +} + +// SetGlacierJobParameters sets the GlacierJobParameters field's value. +func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest { + s.GlacierJobParameters = v + return s +} + +// SetOutputLocation sets the OutputLocation field's value. +func (s *RestoreRequest) SetOutputLocation(v *OutputLocation) *RestoreRequest { + s.OutputLocation = v + return s +} + +// SetSelectParameters sets the SelectParameters field's value. +func (s *RestoreRequest) SetSelectParameters(v *SelectParameters) *RestoreRequest { + s.SelectParameters = v + return s +} + +// SetTier sets the Tier field's value. +func (s *RestoreRequest) SetTier(v string) *RestoreRequest { + s.Tier = &v + return s +} + +// SetType sets the Type field's value. +func (s *RestoreRequest) SetType(v string) *RestoreRequest { + s.Type = &v + return s +} + +// Specifies the restoration status of an object. Objects in certain storage +// classes must be restored before they can be retrieved. For more information +// about these storage classes and how to work with archived objects, see Working +// with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) +// in the Amazon S3 User Guide. +// +// This functionality is not supported for directory buckets. Only the S3 Express +// One Zone storage class is supported by directory buckets to store objects. +type RestoreStatus struct { + _ struct{} `type:"structure"` + + // Specifies whether the object is currently being restored. If the object restoration + // is in progress, the header returns the value TRUE. For example: + // + // x-amz-optional-object-attributes: IsRestoreInProgress="true" + // + // If the object restoration has completed, the header returns the value FALSE. + // For example: + // + // x-amz-optional-object-attributes: IsRestoreInProgress="false", RestoreExpiryDate="2012-12-21T00:00:00.000Z" + // + // If the object hasn't been restored, there is no header response. + IsRestoreInProgress *bool `type:"boolean"` + + // Indicates when the restored copy will expire. This value is populated only + // if the object has already been restored. For example: + // + // x-amz-optional-object-attributes: IsRestoreInProgress="false", RestoreExpiryDate="2012-12-21T00:00:00.000Z" + RestoreExpiryDate *time.Time `type:"timestamp"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreStatus) GoString() string { + return s.String() +} + +// SetIsRestoreInProgress sets the IsRestoreInProgress field's value. +func (s *RestoreStatus) SetIsRestoreInProgress(v bool) *RestoreStatus { + s.IsRestoreInProgress = &v + return s +} + +// SetRestoreExpiryDate sets the RestoreExpiryDate field's value. +func (s *RestoreStatus) SetRestoreExpiryDate(v time.Time) *RestoreStatus { + s.RestoreExpiryDate = &v + return s +} + +// Specifies the redirect behavior and when a redirect is applied. For more +// information about routing rules, see Configuring advanced conditional redirects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects) +// in the Amazon S3 User Guide. +type RoutingRule struct { + _ struct{} `type:"structure"` + + // A container for describing a condition that must be met for the specified + // redirect to apply. For example, 1. If request is for pages in the /docs folder, + // redirect to the /documents folder. 2. If request results in HTTP error 4xx, + // redirect request to another host where you might process the error. + Condition *Condition `type:"structure"` + + // Container for redirect information. You can redirect requests to another + // host, to another page, or with another protocol. In the event of an error, + // you can specify a different error code to return. + // + // Redirect is a required field + Redirect *Redirect `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RoutingRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RoutingRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RoutingRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RoutingRule"} + if s.Redirect == nil { + invalidParams.Add(request.NewErrParamRequired("Redirect")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCondition sets the Condition field's value. +func (s *RoutingRule) SetCondition(v *Condition) *RoutingRule { + s.Condition = v + return s +} + +// SetRedirect sets the Redirect field's value. +func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { + s.Redirect = v + return s +} + +// Specifies lifecycle rules for an Amazon S3 bucket. For more information, +// see Put Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html) +// in the Amazon S3 API Reference. For examples, see Put Bucket Lifecycle Configuration +// Examples (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html#API_PutBucketLifecycleConfiguration_Examples). +type Rule struct { + _ struct{} `type:"structure"` + + // Specifies the days since the initiation of an incomplete multipart upload + // that Amazon S3 will wait before permanently removing all parts of the upload. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon S3 User Guide. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + + // Specifies the expiration for the lifecycle of the object. + Expiration *LifecycleExpiration `type:"structure"` + + // Unique identifier for the rule. The value can't be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + // Container for the transition rule that describes when noncurrent objects + // transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER_IR, + // GLACIER, or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled + // (or versioning is suspended), you can set this action to request that Amazon + // S3 transition noncurrent object versions to the STANDARD_IA, ONEZONE_IA, + // INTELLIGENT_TIERING, GLACIER_IR, GLACIER, or DEEP_ARCHIVE storage class at + // a specific period in the object's lifetime. + NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` + + // Object key prefix that identifies one or more objects to which this rule + // applies. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` + + // If Enabled, the rule is currently being applied. If Disabled, the rule is + // not currently being applied. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + // Specifies when an object transitions to a specified storage class. For more + // information about Amazon S3 lifecycle configuration rules, see Transitioning + // Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) + // in the Amazon S3 User Guide. + Transition *Transition `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Rule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Rule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Rule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Rule"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. +func (s *Rule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *Rule { + s.AbortIncompleteMultipartUpload = v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *Rule) SetExpiration(v *LifecycleExpiration) *Rule { + s.Expiration = v + return s +} + +// SetID sets the ID field's value. +func (s *Rule) SetID(v string) *Rule { + s.ID = &v + return s +} + +// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. +func (s *Rule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *Rule { + s.NoncurrentVersionExpiration = v + return s +} + +// SetNoncurrentVersionTransition sets the NoncurrentVersionTransition field's value. +func (s *Rule) SetNoncurrentVersionTransition(v *NoncurrentVersionTransition) *Rule { + s.NoncurrentVersionTransition = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *Rule) SetPrefix(v string) *Rule { + s.Prefix = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *Rule) SetStatus(v string) *Rule { + s.Status = &v + return s +} + +// SetTransition sets the Transition field's value. +func (s *Rule) SetTransition(v *Transition) *Rule { + s.Transition = v + return s +} + +// Specifies the use of SSE-KMS to encrypt delivered inventory reports. +type SSEKMS struct { + _ struct{} `locationName:"SSE-KMS" type:"structure"` + + // Specifies the ID of the Key Management Service (KMS) symmetric encryption + // customer managed key to use for encrypting inventory reports. + // + // KeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by SSEKMS's + // String and GoString methods. + // + // KeyId is a required field + KeyId *string `type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SSEKMS) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SSEKMS) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SSEKMS) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SSEKMS"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKeyId sets the KeyId field's value. +func (s *SSEKMS) SetKeyId(v string) *SSEKMS { + s.KeyId = &v + return s +} + +// Specifies the use of SSE-S3 to encrypt delivered inventory reports. +type SSES3 struct { + _ struct{} `locationName:"SSE-S3" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SSES3) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SSES3) GoString() string { + return s.String() +} + +// Specifies the byte range of the object to get the records from. A record +// is processed when its first byte is contained by the range. This parameter +// is optional, but when specified, it must not be empty. See RFC 2616, Section +// 14.35.1 about how to specify the start and end of the range. +type ScanRange struct { + _ struct{} `type:"structure"` + + // Specifies the end of the byte range. This parameter is optional. Valid values: + // non-negative integers. The default value is one less than the size of the + // object being queried. If only the End parameter is supplied, it is interpreted + // to mean scan the last N bytes of the file. For example, 50 + // means scan the last 50 bytes. + End *int64 `type:"long"` + + // Specifies the start of the byte range. This parameter is optional. Valid + // values: non-negative integers. The default value is 0. If only start is supplied, + // it means scan from that point to the end of the file. For example, 50 + // means scan from byte 50 until the end of the file. + Start *int64 `type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ScanRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ScanRange) GoString() string { + return s.String() +} + +// SetEnd sets the End field's value. +func (s *ScanRange) SetEnd(v int64) *ScanRange { + s.End = &v + return s +} + +// SetStart sets the Start field's value. +func (s *ScanRange) SetStart(v int64) *ScanRange { + s.Start = &v + return s +} + +// SelectObjectContentEventStreamEvent groups together all EventStream +// events writes for SelectObjectContentEventStream. +// +// These events are: +// +// - ContinuationEvent +// - EndEvent +// - ProgressEvent +// - RecordsEvent +// - StatsEvent +type SelectObjectContentEventStreamEvent interface { + eventSelectObjectContentEventStream() + eventstreamapi.Marshaler + eventstreamapi.Unmarshaler +} + +// SelectObjectContentEventStreamReader provides the interface for reading to the stream. The +// default implementation for this interface will be SelectObjectContentEventStreamData. +// +// The reader's Close method must allow multiple concurrent calls. +// +// These events are: +// +// - ContinuationEvent +// - EndEvent +// - ProgressEvent +// - RecordsEvent +// - StatsEvent +// - SelectObjectContentEventStreamUnknownEvent +type SelectObjectContentEventStreamReader interface { + // Returns a channel of events as they are read from the event stream. + Events() <-chan SelectObjectContentEventStreamEvent + + // Close will stop the reader reading events from the stream. + Close() error + + // Returns any error that has occurred while reading from the event stream. + Err() error +} + +type readSelectObjectContentEventStream struct { + eventReader *eventstreamapi.EventReader + stream chan SelectObjectContentEventStreamEvent + err *eventstreamapi.OnceError + + done chan struct{} + closeOnce sync.Once +} + +func newReadSelectObjectContentEventStream(eventReader *eventstreamapi.EventReader) *readSelectObjectContentEventStream { + r := &readSelectObjectContentEventStream{ + eventReader: eventReader, + stream: make(chan SelectObjectContentEventStreamEvent), + done: make(chan struct{}), + err: eventstreamapi.NewOnceError(), + } + go r.readEventStream() + + return r +} + +// Close will close the underlying event stream reader. +func (r *readSelectObjectContentEventStream) Close() error { + r.closeOnce.Do(r.safeClose) + return r.Err() +} + +func (r *readSelectObjectContentEventStream) ErrorSet() <-chan struct{} { + return r.err.ErrorSet() +} + +func (r *readSelectObjectContentEventStream) Closed() <-chan struct{} { + return r.done +} + +func (r *readSelectObjectContentEventStream) safeClose() { + close(r.done) +} + +func (r *readSelectObjectContentEventStream) Err() error { + return r.err.Err() +} + +func (r *readSelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { + return r.stream +} + +func (r *readSelectObjectContentEventStream) readEventStream() { + defer r.Close() + defer close(r.stream) + + for { + event, err := r.eventReader.ReadEvent() + if err != nil { + if err == io.EOF { + return + } + select { + case <-r.done: + // If closed already ignore the error + return + default: + } + if _, ok := err.(*eventstreamapi.UnknownMessageTypeError); ok { + continue + } + r.err.SetError(err) + return + } + + select { + case r.stream <- event.(SelectObjectContentEventStreamEvent): + case <-r.done: + return + } + } +} + +type unmarshalerForSelectObjectContentEventStreamEvent struct { + metadata protocol.ResponseMetadata +} + +func (u unmarshalerForSelectObjectContentEventStreamEvent) UnmarshalerForEventName(eventType string) (eventstreamapi.Unmarshaler, error) { + switch eventType { + case "Cont": + return &ContinuationEvent{}, nil + case "End": + return &EndEvent{}, nil + case "Progress": + return &ProgressEvent{}, nil + case "Records": + return &RecordsEvent{}, nil + case "Stats": + return &StatsEvent{}, nil + default: + return &SelectObjectContentEventStreamUnknownEvent{Type: eventType}, nil + } +} + +// SelectObjectContentEventStreamUnknownEvent provides a failsafe event for the +// SelectObjectContentEventStream group of events when an unknown event is received. +type SelectObjectContentEventStreamUnknownEvent struct { + Type string + Message eventstream.Message +} + +// The SelectObjectContentEventStreamUnknownEvent is and event in the SelectObjectContentEventStream +// group of events. +func (s *SelectObjectContentEventStreamUnknownEvent) eventSelectObjectContentEventStream() {} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (e *SelectObjectContentEventStreamUnknownEvent) MarshalEvent(pm protocol.PayloadMarshaler) ( + msg eventstream.Message, err error, +) { + return e.Message.Clone(), nil +} + +// UnmarshalEvent unmarshals the EventStream Message into the SelectObjectContentEventStreamData value. +// This method is only used internally within the SDK's EventStream handling. +func (e *SelectObjectContentEventStreamUnknownEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + e.Message = msg.Clone() + return nil +} + +// Request to filter the contents of an Amazon S3 object based on a simple Structured +// Query Language (SQL) statement. In the request, along with the SQL expression, +// you must specify a data serialization format (JSON or CSV) of the object. +// Amazon S3 uses this to parse object data into records. It returns only records +// that match the specified SQL expression. You must also specify the data serialization +// format for the response. For more information, see S3Select API Documentation +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html). +type SelectObjectContentInput struct { + _ struct{} `locationName:"SelectObjectContentRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The S3 bucket. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The expression that is used to query the object. + // + // Expression is a required field + Expression *string `type:"string" required:"true"` + + // The type of the provided expression (for example, SQL). + // + // ExpressionType is a required field + ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` + + // Describes the format of the data in the object that is being queried. + // + // InputSerialization is a required field + InputSerialization *InputSerialization `type:"structure" required:"true"` + + // The object key. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Describes the format of the data that you want Amazon S3 to return in response. + // + // OutputSerialization is a required field + OutputSerialization *OutputSerialization `type:"structure" required:"true"` + + // Specifies if periodic request progress information should be enabled. + RequestProgress *RequestProgress `type:"structure"` + + // The server-side encryption (SSE) algorithm used to encrypt the object. This + // parameter is needed only when the object was created using a checksum algorithm. + // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // The server-side encryption (SSE) customer managed key. This parameter is + // needed only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by SelectObjectContentInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // The MD5 server-side encryption (SSE) customer managed key. This parameter + // is needed only when the object was created using a checksum algorithm. For + // more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the byte range of the object to get the records from. A record + // is processed when its first byte is contained by the range. This parameter + // is optional, but when specified, it must not be empty. See RFC 2616, Section + // 14.35.1 about how to specify the start and end of the range. + // + // ScanRangemay be used in the following ways: + // + // * 50100 - process only + // the records starting between the bytes 50 and 100 (inclusive, counting + // from zero) + // + // * 50 - process only the records + // starting after the byte 50 + // + // * 50 - process only the records within + // the last 50 bytes of the file. + ScanRange *ScanRange `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SelectObjectContentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SelectObjectContentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SelectObjectContentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelectObjectContentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Expression == nil { + invalidParams.Add(request.NewErrParamRequired("Expression")) + } + if s.ExpressionType == nil { + invalidParams.Add(request.NewErrParamRequired("ExpressionType")) + } + if s.InputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("InputSerialization")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.OutputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSerialization")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *SelectObjectContentInput) SetBucket(v string) *SelectObjectContentInput { + s.Bucket = &v + return s +} + +func (s *SelectObjectContentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *SelectObjectContentInput) SetExpectedBucketOwner(v string) *SelectObjectContentInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetExpression sets the Expression field's value. +func (s *SelectObjectContentInput) SetExpression(v string) *SelectObjectContentInput { + s.Expression = &v + return s +} + +// SetExpressionType sets the ExpressionType field's value. +func (s *SelectObjectContentInput) SetExpressionType(v string) *SelectObjectContentInput { + s.ExpressionType = &v + return s +} + +// SetInputSerialization sets the InputSerialization field's value. +func (s *SelectObjectContentInput) SetInputSerialization(v *InputSerialization) *SelectObjectContentInput { + s.InputSerialization = v + return s +} + +// SetKey sets the Key field's value. +func (s *SelectObjectContentInput) SetKey(v string) *SelectObjectContentInput { + s.Key = &v + return s +} + +// SetOutputSerialization sets the OutputSerialization field's value. +func (s *SelectObjectContentInput) SetOutputSerialization(v *OutputSerialization) *SelectObjectContentInput { + s.OutputSerialization = v + return s +} + +// SetRequestProgress sets the RequestProgress field's value. +func (s *SelectObjectContentInput) SetRequestProgress(v *RequestProgress) *SelectObjectContentInput { + s.RequestProgress = v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *SelectObjectContentInput) SetSSECustomerAlgorithm(v string) *SelectObjectContentInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *SelectObjectContentInput) SetSSECustomerKey(v string) *SelectObjectContentInput { + s.SSECustomerKey = &v + return s +} + +func (s *SelectObjectContentInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *SelectObjectContentInput) SetSSECustomerKeyMD5(v string) *SelectObjectContentInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetScanRange sets the ScanRange field's value. +func (s *SelectObjectContentInput) SetScanRange(v *ScanRange) *SelectObjectContentInput { + s.ScanRange = v + return s +} + +func (s *SelectObjectContentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *SelectObjectContentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s SelectObjectContentInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type SelectObjectContentOutput struct { + _ struct{} `type:"structure" payload:"Payload"` + + EventStream *SelectObjectContentEventStream +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SelectObjectContentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SelectObjectContentOutput) GoString() string { + return s.String() +} + +func (s *SelectObjectContentOutput) SetEventStream(v *SelectObjectContentEventStream) *SelectObjectContentOutput { + s.EventStream = v + return s +} +func (s *SelectObjectContentOutput) GetEventStream() *SelectObjectContentEventStream { + return s.EventStream +} + +// GetStream returns the type to interact with the event stream. +func (s *SelectObjectContentOutput) GetStream() *SelectObjectContentEventStream { + return s.EventStream +} + +// Describes the parameters for Select job types. +type SelectParameters struct { + _ struct{} `type:"structure"` + + // The expression that is used to query the object. + // + // Expression is a required field + Expression *string `type:"string" required:"true"` + + // The type of the provided expression (for example, SQL). + // + // ExpressionType is a required field + ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` + + // Describes the serialization format of the object. + // + // InputSerialization is a required field + InputSerialization *InputSerialization `type:"structure" required:"true"` + + // Describes how the results of the Select job are serialized. + // + // OutputSerialization is a required field + OutputSerialization *OutputSerialization `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SelectParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SelectParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SelectParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelectParameters"} + if s.Expression == nil { + invalidParams.Add(request.NewErrParamRequired("Expression")) + } + if s.ExpressionType == nil { + invalidParams.Add(request.NewErrParamRequired("ExpressionType")) + } + if s.InputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("InputSerialization")) + } + if s.OutputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSerialization")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExpression sets the Expression field's value. +func (s *SelectParameters) SetExpression(v string) *SelectParameters { + s.Expression = &v + return s +} + +// SetExpressionType sets the ExpressionType field's value. +func (s *SelectParameters) SetExpressionType(v string) *SelectParameters { + s.ExpressionType = &v + return s +} + +// SetInputSerialization sets the InputSerialization field's value. +func (s *SelectParameters) SetInputSerialization(v *InputSerialization) *SelectParameters { + s.InputSerialization = v + return s +} + +// SetOutputSerialization sets the OutputSerialization field's value. +func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *SelectParameters { + s.OutputSerialization = v + return s +} + +// Describes the default server-side encryption to apply to new objects in the +// bucket. If a PUT Object request doesn't specify any server-side encryption, +// this default encryption will be applied. If you don't specify a customer +// managed key at configuration, Amazon S3 automatically creates an Amazon Web +// Services KMS key in your Amazon Web Services account the first time that +// you add an object encrypted with SSE-KMS to a bucket. By default, Amazon +// S3 uses this KMS key for SSE-KMS. For more information, see PUT Bucket encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) +// in the Amazon S3 API Reference. +type ServerSideEncryptionByDefault struct { + _ struct{} `type:"structure"` + + // Amazon Web Services Key Management Service (KMS) customer Amazon Web Services + // KMS key ID to use for the default encryption. This parameter is allowed if + // and only if SSEAlgorithm is set to aws:kms or aws:kms:dsse. + // + // You can specify the key ID, key alias, or the Amazon Resource Name (ARN) + // of the KMS key. + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key Alias: alias/alias-name + // + // If you use a key ID, you can run into a LogDestination undeliverable error + // when creating a VPC flow log. + // + // If you are using encryption with cross-account or Amazon Web Services service + // operations you must use a fully qualified KMS key ARN. For more information, + // see Using encryption for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy). + // + // Amazon S3 only supports symmetric encryption KMS keys. For more information, + // see Asymmetric keys in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the Amazon Web Services Key Management Service Developer Guide. + // + // KMSMasterKeyID is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by ServerSideEncryptionByDefault's + // String and GoString methods. + KMSMasterKeyID *string `type:"string" sensitive:"true"` + + // Server-side encryption algorithm to use for the default encryption. + // + // SSEAlgorithm is a required field + SSEAlgorithm *string `type:"string" required:"true" enum:"ServerSideEncryption"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ServerSideEncryptionByDefault) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ServerSideEncryptionByDefault) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServerSideEncryptionByDefault) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionByDefault"} + if s.SSEAlgorithm == nil { + invalidParams.Add(request.NewErrParamRequired("SSEAlgorithm")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKMSMasterKeyID sets the KMSMasterKeyID field's value. +func (s *ServerSideEncryptionByDefault) SetKMSMasterKeyID(v string) *ServerSideEncryptionByDefault { + s.KMSMasterKeyID = &v + return s +} + +// SetSSEAlgorithm sets the SSEAlgorithm field's value. +func (s *ServerSideEncryptionByDefault) SetSSEAlgorithm(v string) *ServerSideEncryptionByDefault { + s.SSEAlgorithm = &v + return s +} + +// Specifies the default server-side-encryption configuration. +type ServerSideEncryptionConfiguration struct { + _ struct{} `type:"structure"` + + // Container for information about a particular server-side encryption configuration + // rule. + // + // Rules is a required field + Rules []*ServerSideEncryptionRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ServerSideEncryptionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ServerSideEncryptionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServerSideEncryptionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *ServerSideEncryptionConfiguration) SetRules(v []*ServerSideEncryptionRule) *ServerSideEncryptionConfiguration { + s.Rules = v + return s +} + +// Specifies the default server-side encryption configuration. +type ServerSideEncryptionRule struct { + _ struct{} `type:"structure"` + + // Specifies the default server-side encryption to apply to new objects in the + // bucket. If a PUT Object request doesn't specify any server-side encryption, + // this default encryption will be applied. + ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault `type:"structure"` + + // Specifies whether Amazon S3 should use an S3 Bucket Key with server-side + // encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects + // are not affected. Setting the BucketKeyEnabled element to true causes Amazon + // S3 to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled. + // + // For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) + // in the Amazon S3 User Guide. + BucketKeyEnabled *bool `type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ServerSideEncryptionRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ServerSideEncryptionRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServerSideEncryptionRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionRule"} + if s.ApplyServerSideEncryptionByDefault != nil { + if err := s.ApplyServerSideEncryptionByDefault.Validate(); err != nil { + invalidParams.AddNested("ApplyServerSideEncryptionByDefault", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplyServerSideEncryptionByDefault sets the ApplyServerSideEncryptionByDefault field's value. +func (s *ServerSideEncryptionRule) SetApplyServerSideEncryptionByDefault(v *ServerSideEncryptionByDefault) *ServerSideEncryptionRule { + s.ApplyServerSideEncryptionByDefault = v + return s +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *ServerSideEncryptionRule) SetBucketKeyEnabled(v bool) *ServerSideEncryptionRule { + s.BucketKeyEnabled = &v + return s +} + +// The established temporary security credentials of the session. +// +// Directory buckets - These session credentials are only supported for the +// authentication and authorization of Zonal endpoint APIs on directory buckets. +type SessionCredentials struct { + _ struct{} `type:"structure"` + + // A unique identifier that's associated with a secret access key. The access + // key ID and the secret access key are used together to sign programmatic Amazon + // Web Services requests cryptographically. + // + // AccessKeyId is a required field + AccessKeyId *string `locationName:"AccessKeyId" type:"string" required:"true"` + + // Temporary security credentials expire after a specified interval. After temporary + // credentials expire, any calls that you make with those credentials will fail. + // So you must generate a new set of temporary credentials. Temporary credentials + // cannot be extended or refreshed beyond the original specified interval. + // + // Expiration is a required field + Expiration *time.Time `locationName:"Expiration" type:"timestamp" required:"true"` + + // A key that's used with the access key ID to cryptographically sign programmatic + // Amazon Web Services requests. Signing a request identifies the sender and + // prevents the request from being altered. + // + // SecretAccessKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by SessionCredentials's + // String and GoString methods. + // + // SecretAccessKey is a required field + SecretAccessKey *string `locationName:"SecretAccessKey" type:"string" required:"true" sensitive:"true"` + + // A part of the temporary security credentials. The session token is used to + // validate the temporary security credentials. + // + // SessionToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by SessionCredentials's + // String and GoString methods. + // + // SessionToken is a required field + SessionToken *string `locationName:"SessionToken" type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SessionCredentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SessionCredentials) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *SessionCredentials) SetAccessKeyId(v string) *SessionCredentials { + s.AccessKeyId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *SessionCredentials) SetExpiration(v time.Time) *SessionCredentials { + s.Expiration = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *SessionCredentials) SetSecretAccessKey(v string) *SessionCredentials { + s.SecretAccessKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *SessionCredentials) SetSessionToken(v string) *SessionCredentials { + s.SessionToken = &v + return s +} + +// To use simple format for S3 keys for log objects, set SimplePrefix to an +// empty object. +// +// [DestinationPrefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] +type SimplePrefix struct { + _ struct{} `locationName:"SimplePrefix" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SimplePrefix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SimplePrefix) GoString() string { + return s.String() +} + +// A container that describes additional filters for identifying the source +// objects that you want to replicate. You can choose to enable or disable the +// replication of these objects. Currently, Amazon S3 supports only the filter +// that you can specify for objects created with server-side encryption using +// a customer managed key stored in Amazon Web Services Key Management Service +// (SSE-KMS). +type SourceSelectionCriteria struct { + _ struct{} `type:"structure"` + + // A filter that you can specify for selections for modifications on replicas. + // Amazon S3 doesn't replicate replica modifications by default. In the latest + // version of replication configuration (when Filter is specified), you can + // specify this element and set the status to Enabled to replicate modifications + // on replicas. + // + // If you don't specify the Filter element, Amazon S3 assumes that the replication + // configuration is the earlier version, V1. In the earlier version, this element + // is not allowed + ReplicaModifications *ReplicaModifications `type:"structure"` + + // A container for filter information for the selection of Amazon S3 objects + // encrypted with Amazon Web Services KMS. If you include SourceSelectionCriteria + // in the replication configuration, this element is required. + SseKmsEncryptedObjects *SseKmsEncryptedObjects `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SourceSelectionCriteria) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SourceSelectionCriteria) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SourceSelectionCriteria) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SourceSelectionCriteria"} + if s.ReplicaModifications != nil { + if err := s.ReplicaModifications.Validate(); err != nil { + invalidParams.AddNested("ReplicaModifications", err.(request.ErrInvalidParams)) + } + } + if s.SseKmsEncryptedObjects != nil { + if err := s.SseKmsEncryptedObjects.Validate(); err != nil { + invalidParams.AddNested("SseKmsEncryptedObjects", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetReplicaModifications sets the ReplicaModifications field's value. +func (s *SourceSelectionCriteria) SetReplicaModifications(v *ReplicaModifications) *SourceSelectionCriteria { + s.ReplicaModifications = v + return s +} + +// SetSseKmsEncryptedObjects sets the SseKmsEncryptedObjects field's value. +func (s *SourceSelectionCriteria) SetSseKmsEncryptedObjects(v *SseKmsEncryptedObjects) *SourceSelectionCriteria { + s.SseKmsEncryptedObjects = v + return s +} + +// A container for filter information for the selection of S3 objects encrypted +// with Amazon Web Services KMS. +type SseKmsEncryptedObjects struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 replicates objects created with server-side encryption + // using an Amazon Web Services KMS key stored in Amazon Web Services Key Management + // Service. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SseKmsEncryptedObjects) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SseKmsEncryptedObjects) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SseKmsEncryptedObjects) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SseKmsEncryptedObjects"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *SseKmsEncryptedObjects) SetStatus(v string) *SseKmsEncryptedObjects { + s.Status = &v + return s +} + +// Container for the stats details. +type Stats struct { + _ struct{} `type:"structure"` + + // The total number of uncompressed object bytes processed. + BytesProcessed *int64 `type:"long"` + + // The total number of bytes of records payload data returned. + BytesReturned *int64 `type:"long"` + + // The total number of object bytes scanned. + BytesScanned *int64 `type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Stats) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Stats) GoString() string { + return s.String() +} + +// SetBytesProcessed sets the BytesProcessed field's value. +func (s *Stats) SetBytesProcessed(v int64) *Stats { + s.BytesProcessed = &v + return s +} + +// SetBytesReturned sets the BytesReturned field's value. +func (s *Stats) SetBytesReturned(v int64) *Stats { + s.BytesReturned = &v + return s +} + +// SetBytesScanned sets the BytesScanned field's value. +func (s *Stats) SetBytesScanned(v int64) *Stats { + s.BytesScanned = &v + return s +} + +// Container for the Stats Event. +type StatsEvent struct { + _ struct{} `locationName:"StatsEvent" type:"structure" payload:"Details"` + + // The Stats event details. + Details *Stats `locationName:"Details" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StatsEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StatsEvent) GoString() string { + return s.String() +} + +// SetDetails sets the Details field's value. +func (s *StatsEvent) SetDetails(v *Stats) *StatsEvent { + s.Details = v + return s +} + +// The StatsEvent is and event in the SelectObjectContentEventStream group of events. +func (s *StatsEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the StatsEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *StatsEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err + } + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *StatsEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + var buf bytes.Buffer + if err = pm.MarshalPayload(&buf, s); err != nil { + return eventstream.Message{}, err + } + msg.Payload = buf.Bytes() + return msg, err +} + +// Specifies data related to access patterns to be collected and made available +// to analyze the tradeoffs between different storage classes for an Amazon +// S3 bucket. +type StorageClassAnalysis struct { + _ struct{} `type:"structure"` + + // Specifies how data related to the storage class analysis for an Amazon S3 + // bucket should be exported. + DataExport *StorageClassAnalysisDataExport `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StorageClassAnalysis) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StorageClassAnalysis) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StorageClassAnalysis) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysis"} + if s.DataExport != nil { + if err := s.DataExport.Validate(); err != nil { + invalidParams.AddNested("DataExport", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDataExport sets the DataExport field's value. +func (s *StorageClassAnalysis) SetDataExport(v *StorageClassAnalysisDataExport) *StorageClassAnalysis { + s.DataExport = v + return s +} + +// Container for data related to the storage class analysis for an Amazon S3 +// bucket for export. +type StorageClassAnalysisDataExport struct { + _ struct{} `type:"structure"` + + // The place to store the data for an analysis. + // + // Destination is a required field + Destination *AnalyticsExportDestination `type:"structure" required:"true"` + + // The version of the output schema to use when exporting data. Must be V_1. + // + // OutputSchemaVersion is a required field + OutputSchemaVersion *string `type:"string" required:"true" enum:"StorageClassAnalysisSchemaVersion"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StorageClassAnalysisDataExport) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StorageClassAnalysisDataExport) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StorageClassAnalysisDataExport) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysisDataExport"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.OutputSchemaVersion == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSchemaVersion")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestination sets the Destination field's value. +func (s *StorageClassAnalysisDataExport) SetDestination(v *AnalyticsExportDestination) *StorageClassAnalysisDataExport { + s.Destination = v + return s +} + +// SetOutputSchemaVersion sets the OutputSchemaVersion field's value. +func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *StorageClassAnalysisDataExport { + s.OutputSchemaVersion = &v + return s +} + +// A container of a key value name pair. +type Tag struct { + _ struct{} `type:"structure"` + + // Name of the object key. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // Value of the tag. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +// Container for TagSet elements. +type Tagging struct { + _ struct{} `type:"structure"` + + // A collection for a set of tags + // + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tagging) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tagging) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tagging) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tagging"} + if s.TagSet == nil { + invalidParams.Add(request.NewErrParamRequired("TagSet")) + } + if s.TagSet != nil { + for i, v := range s.TagSet { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagSet", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTagSet sets the TagSet field's value. +func (s *Tagging) SetTagSet(v []*Tag) *Tagging { + s.TagSet = v + return s +} + +// Container for granting information. +// +// Buckets that use the bucket owner enforced setting for Object Ownership don't +// support target grants. For more information, see Permissions server access +// log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) +// in the Amazon S3 User Guide. +type TargetGrant struct { + _ struct{} `type:"structure"` + + // Container for the person being granted permissions. + Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Logging permissions assigned to the grantee for the bucket. + Permission *string `type:"string" enum:"BucketLogsPermission"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TargetGrant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TargetGrant) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TargetGrant) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TargetGrant"} + if s.Grantee != nil { + if err := s.Grantee.Validate(); err != nil { + invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrantee sets the Grantee field's value. +func (s *TargetGrant) SetGrantee(v *Grantee) *TargetGrant { + s.Grantee = v + return s +} + +// SetPermission sets the Permission field's value. +func (s *TargetGrant) SetPermission(v string) *TargetGrant { + s.Permission = &v + return s +} + +// Amazon S3 key format for log objects. Only one format, PartitionedPrefix +// or SimplePrefix, is allowed. +type TargetObjectKeyFormat struct { + _ struct{} `type:"structure"` + + // Partitioned S3 key for log objects. + PartitionedPrefix *PartitionedPrefix `locationName:"PartitionedPrefix" type:"structure"` + + // To use the simple format for S3 keys for log objects. To specify SimplePrefix + // format, set SimplePrefix to {}. + SimplePrefix *SimplePrefix `locationName:"SimplePrefix" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TargetObjectKeyFormat) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TargetObjectKeyFormat) GoString() string { + return s.String() +} + +// SetPartitionedPrefix sets the PartitionedPrefix field's value. +func (s *TargetObjectKeyFormat) SetPartitionedPrefix(v *PartitionedPrefix) *TargetObjectKeyFormat { + s.PartitionedPrefix = v + return s +} + +// SetSimplePrefix sets the SimplePrefix field's value. +func (s *TargetObjectKeyFormat) SetSimplePrefix(v *SimplePrefix) *TargetObjectKeyFormat { + s.SimplePrefix = v + return s +} + +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without additional operational overhead. +type Tiering struct { + _ struct{} `type:"structure"` + + // S3 Intelligent-Tiering access tier. See Storage class for automatically optimizing + // frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) + // for a list of access tiers in the S3 Intelligent-Tiering storage class. + // + // AccessTier is a required field + AccessTier *string `type:"string" required:"true" enum:"IntelligentTieringAccessTier"` + + // The number of consecutive days of no access after which an object will be + // eligible to be transitioned to the corresponding tier. The minimum number + // of days specified for Archive Access tier must be at least 90 days and Deep + // Archive Access tier must be at least 180 days. The maximum can be up to 2 + // years (730 days). + // + // Days is a required field + Days *int64 `type:"integer" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tiering) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tiering) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tiering) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tiering"} + if s.AccessTier == nil { + invalidParams.Add(request.NewErrParamRequired("AccessTier")) + } + if s.Days == nil { + invalidParams.Add(request.NewErrParamRequired("Days")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessTier sets the AccessTier field's value. +func (s *Tiering) SetAccessTier(v string) *Tiering { + s.AccessTier = &v + return s +} + +// SetDays sets the Days field's value. +func (s *Tiering) SetDays(v int64) *Tiering { + s.Days = &v + return s +} + +// A container for specifying the configuration for publication of messages +// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 +// detects specified events. +type TopicConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket event about which to send notifications. For more information, + // see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon S3 User Guide. + // + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true" enum:"Event"` + + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring event notifications using object key name filtering + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) + // in the Amazon S3 User Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to which Amazon S3 + // publishes a message when it detects events of the specified type. + // + // TopicArn is a required field + TopicArn *string `locationName:"Topic" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TopicConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TopicConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TopicConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TopicConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.TopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("TopicArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *TopicConfiguration) SetEvents(v []*string) *TopicConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *TopicConfiguration) SetFilter(v *NotificationConfigurationFilter) *TopicConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *TopicConfiguration) SetId(v string) *TopicConfiguration { + s.Id = &v + return s +} + +// SetTopicArn sets the TopicArn field's value. +func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration { + s.TopicArn = &v + return s +} + +// A container for specifying the configuration for publication of messages +// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 +// detects specified events. This data type is deprecated. Use TopicConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_TopicConfiguration.html) +// instead. +type TopicConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // Bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated + Event *string `deprecated:"true" type:"string" enum:"Event"` + + // A collection of events related to objects + Events []*string `locationName:"Event" type:"list" flattened:"true" enum:"Event"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SNS topic to which Amazon S3 will publish a message to report the + // specified events for the bucket. + Topic *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TopicConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TopicConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetEvent sets the Event field's value. +func (s *TopicConfigurationDeprecated) SetEvent(v string) *TopicConfigurationDeprecated { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *TopicConfigurationDeprecated) SetEvents(v []*string) *TopicConfigurationDeprecated { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *TopicConfigurationDeprecated) SetId(v string) *TopicConfigurationDeprecated { + s.Id = &v + return s +} + +// SetTopic sets the Topic field's value. +func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDeprecated { + s.Topic = &v + return s +} + +// Specifies when an object transitions to a specified storage class. For more +// information about Amazon S3 lifecycle configuration rules, see Transitioning +// Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) +// in the Amazon S3 User Guide. +type Transition struct { + _ struct{} `type:"structure"` + + // Indicates when objects are transitioned to the specified storage class. The + // date value must be in ISO 8601 format. The time is always midnight UTC. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the number of days after creation when objects are transitioned + // to the specified storage class. The value must be a positive integer. + Days *int64 `type:"integer"` + + // The storage class to which you want the object to transition. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Transition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Transition) GoString() string { + return s.String() +} + +// SetDate sets the Date field's value. +func (s *Transition) SetDate(v time.Time) *Transition { + s.Date = &v + return s +} + +// SetDays sets the Days field's value. +func (s *Transition) SetDays(v int64) *Transition { + s.Days = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Transition) SetStorageClass(v string) *Transition { + s.StorageClass = &v + return s +} + +type UploadPartCopyInput struct { + _ struct{} `locationName:"UploadPartCopyRequest" type:"structure"` + + // The bucket name. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies the source object for the copy operation. You specify the value + // in one of two formats, depending on whether you want to access the source + // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html): + // + // * For objects not accessed through an access point, specify the name of + // the source bucket and key of the source object, separated by a slash (/). + // For example, to copy the object reports/january.pdf from the bucket awsexamplebucket, + // use awsexamplebucket/reports/january.pdf. The value must be URL-encoded. + // + // * For objects accessed through access points, specify the Amazon Resource + // Name (ARN) of the object as accessed through the access point, in the + // format arn:aws:s3:::accesspoint//object/. + // For example, to copy the object reports/january.pdf through access point + // my-access-point owned by account 123456789012 in Region us-west-2, use + // the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. + // The value must be URL encoded. Amazon S3 supports copy operations using + // Access points only when the source and destination buckets are in the + // same Amazon Web Services Region. Access points are not supported by directory + // buckets. Alternatively, for objects accessed through Amazon S3 on Outposts, + // specify the ARN of the object as accessed in the format arn:aws:s3-outposts:::outpost//object/. + // For example, to copy the object reports/january.pdf through outpost my-outpost + // owned by account 123456789012 in Region us-west-2, use the URL encoding + // of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. + // The value must be URL-encoded. + // + // If your bucket has versioning enabled, you could have multiple versions of + // the same object. By default, x-amz-copy-source identifies the current version + // of the source object to copy. To copy a specific version of the source object + // to copy, append ?versionId= to the x-amz-copy-source request + // header (for example, x-amz-copy-source: /awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). + // + // If the current version is a delete marker and you don't specify a versionId + // in the x-amz-copy-source request header, Amazon S3 returns a 404 Not Found + // error, because the object does not exist. If you specify versionId in the + // x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns + // an HTTP 400 Bad Request error, because you are not allowed to specify a delete + // marker as a version for the x-amz-copy-source. + // + // Directory buckets - S3 Versioning isn't enabled and supported for directory + // buckets. + // + // CopySource is a required field + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + // + // If both of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // headers are present in the request as follows: + // + // x-amz-copy-source-if-match condition evaluates to true, and; + // + // x-amz-copy-source-if-unmodified-since condition evaluates to false; + // + // Amazon S3 returns 200 OK and copies the data. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + // + // If both of the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since + // headers are present in the request as follows: + // + // x-amz-copy-source-if-none-match condition evaluates to false, and; + // + // x-amz-copy-source-if-modified-since condition evaluates to true; + // + // Amazon S3 returns 412 Precondition Failed response code. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + // + // If both of the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since + // headers are present in the request as follows: + // + // x-amz-copy-source-if-none-match condition evaluates to false, and; + // + // x-amz-copy-source-if-modified-since condition evaluates to true; + // + // Amazon S3 returns 412 Precondition Failed response code. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + // + // If both of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // headers are present in the request as follows: + // + // x-amz-copy-source-if-match condition evaluates to true, and; + // + // x-amz-copy-source-if-unmodified-since condition evaluates to false; + // + // Amazon S3 returns 200 OK and copies the data. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` + + // The range of bytes to copy from the source object. The range value must use + // the form bytes=first-last, where the first and last are the zero-based byte + // offsets to copy. For example, bytes=0-9 indicates that you want to copy the + // first 10 bytes of the source. You can copy a range only if the source object + // is greater than 5 MB. + CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"` + + // Specifies the algorithm to use when decrypting the source object (for example, + // AES256). + // + // This functionality is not supported when the source object is in a directory + // bucket. + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + // + // This functionality is not supported when the source object is in a directory + // bucket. + // + // CopySourceSSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by UploadPartCopyInput's + // String and GoString methods. + CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + // + // This functionality is not supported when the source object is in a directory + // bucket. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + // The account ID of the expected destination bucket owner. If the account ID + // that you provide does not match the actual owner of the destination bucket, + // the request fails with the HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The account ID of the expected source bucket owner. If the account ID that + // you provide does not match the actual owner of the source bucket, the request + // fails with the HTTP status code 403 Forbidden (access denied). + ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"` + + // Object key for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being copied. This is a positive integer between 1 and + // 10,000. + // + // PartNumber is a required field + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported when the destination bucket is a directory + // bucket. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by UploadPartCopyInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being copied. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UploadPartCopyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UploadPartCopyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadPartCopyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadPartCopyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.CopySource == nil { + invalidParams.Add(request.NewErrParamRequired("CopySource")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.PartNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PartNumber")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *UploadPartCopyInput) SetBucket(v string) *UploadPartCopyInput { + s.Bucket = &v + return s +} + +func (s *UploadPartCopyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCopySource sets the CopySource field's value. +func (s *UploadPartCopyInput) SetCopySource(v string) *UploadPartCopyInput { + s.CopySource = &v + return s +} + +// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. +func (s *UploadPartCopyInput) SetCopySourceIfMatch(v string) *UploadPartCopyInput { + s.CopySourceIfMatch = &v + return s +} + +// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. +func (s *UploadPartCopyInput) SetCopySourceIfModifiedSince(v time.Time) *UploadPartCopyInput { + s.CopySourceIfModifiedSince = &v + return s +} + +// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. +func (s *UploadPartCopyInput) SetCopySourceIfNoneMatch(v string) *UploadPartCopyInput { + s.CopySourceIfNoneMatch = &v + return s +} + +// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. +func (s *UploadPartCopyInput) SetCopySourceIfUnmodifiedSince(v time.Time) *UploadPartCopyInput { + s.CopySourceIfUnmodifiedSince = &v + return s +} + +// SetCopySourceRange sets the CopySourceRange field's value. +func (s *UploadPartCopyInput) SetCopySourceRange(v string) *UploadPartCopyInput { + s.CopySourceRange = &v + return s +} + +// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerAlgorithm(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerAlgorithm = &v + return s +} + +// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerKey(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerKey = &v + return s +} + +func (s *UploadPartCopyInput) getCopySourceSSECustomerKey() (v string) { + if s.CopySourceSSECustomerKey == nil { + return v + } + return *s.CopySourceSSECustomerKey +} + +// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerKeyMD5(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerKeyMD5 = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *UploadPartCopyInput) SetExpectedBucketOwner(v string) *UploadPartCopyInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetExpectedSourceBucketOwner sets the ExpectedSourceBucketOwner field's value. +func (s *UploadPartCopyInput) SetExpectedSourceBucketOwner(v string) *UploadPartCopyInput { + s.ExpectedSourceBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *UploadPartCopyInput) SetKey(v string) *UploadPartCopyInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *UploadPartCopyInput) SetPartNumber(v int64) *UploadPartCopyInput { + s.PartNumber = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *UploadPartCopyInput) SetRequestPayer(v string) *UploadPartCopyInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartCopyInput) SetSSECustomerAlgorithm(v string) *UploadPartCopyInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *UploadPartCopyInput) SetSSECustomerKey(v string) *UploadPartCopyInput { + s.SSECustomerKey = &v + return s +} + +func (s *UploadPartCopyInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartCopyInput) SetSSECustomerKeyMD5(v string) *UploadPartCopyInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput { + s.UploadId = &v + return s +} + +func (s *UploadPartCopyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *UploadPartCopyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s UploadPartCopyInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type UploadPartCopyOutput struct { + _ struct{} `type:"structure" payload:"CopyPartResult"` + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Container for all response elements. + CopyPartResult *CopyPartResult `type:"structure"` + + // The version of the source object that was copied, if you have enabled versioning + // on the source bucket. + // + // This functionality is not supported when the source object is in a directory + // bucket. + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to confirm the encryption algorithm + // that's used. + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide the round-trip message integrity + // verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by UploadPartCopyOutput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256, aws:kms). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UploadPartCopyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UploadPartCopyOutput) GoString() string { + return s.String() +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *UploadPartCopyOutput) SetBucketKeyEnabled(v bool) *UploadPartCopyOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetCopyPartResult sets the CopyPartResult field's value. +func (s *UploadPartCopyOutput) SetCopyPartResult(v *CopyPartResult) *UploadPartCopyOutput { + s.CopyPartResult = v + return s +} + +// SetCopySourceVersionId sets the CopySourceVersionId field's value. +func (s *UploadPartCopyOutput) SetCopySourceVersionId(v string) *UploadPartCopyOutput { + s.CopySourceVersionId = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *UploadPartCopyOutput) SetRequestCharged(v string) *UploadPartCopyOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartCopyOutput) SetSSECustomerAlgorithm(v string) *UploadPartCopyOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartCopyOutput) SetSSECustomerKeyMD5(v string) *UploadPartCopyOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *UploadPartCopyOutput) SetSSEKMSKeyId(v string) *UploadPartCopyOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopyOutput { + s.ServerSideEncryption = &v + return s +} + +type UploadPartInput struct { + _ struct{} `locationName:"UploadPartRequest" type:"structure" payload:"Body"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + // The name of the bucket to which the multipart upload was initiated. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // This checksum algorithm must be the same for all parts and it match the checksum + // value supplied in the CreateMultipartUpload request. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32 checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32C checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 160-bit SHA-1 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 256-bit SHA-256 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The base64-encoded 128-bit MD5 digest of the part data. This parameter is + // auto-populated when using the command from the CLI. This parameter is required + // if object lock parameters are specified. + // + // This functionality is not supported for directory buckets. + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being uploaded. This is a positive integer between 1 + // and 10,000. + // + // PartNumber is a required field + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + // + // This functionality is not supported for directory buckets. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by UploadPartInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being uploaded. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UploadPartInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UploadPartInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadPartInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadPartInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.PartNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PartNumber")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBody sets the Body field's value. +func (s *UploadPartInput) SetBody(v io.ReadSeeker) *UploadPartInput { + s.Body = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *UploadPartInput) SetBucket(v string) *UploadPartInput { + s.Bucket = &v + return s +} + +func (s *UploadPartInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *UploadPartInput) SetChecksumAlgorithm(v string) *UploadPartInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *UploadPartInput) SetChecksumCRC32(v string) *UploadPartInput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *UploadPartInput) SetChecksumCRC32C(v string) *UploadPartInput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *UploadPartInput) SetChecksumSHA1(v string) *UploadPartInput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *UploadPartInput) SetChecksumSHA256(v string) *UploadPartInput { + s.ChecksumSHA256 = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput { + s.ContentLength = &v + return s +} + +// SetContentMD5 sets the ContentMD5 field's value. +func (s *UploadPartInput) SetContentMD5(v string) *UploadPartInput { + s.ContentMD5 = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *UploadPartInput) SetExpectedBucketOwner(v string) *UploadPartInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *UploadPartInput) SetKey(v string) *UploadPartInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *UploadPartInput) SetPartNumber(v int64) *UploadPartInput { + s.PartNumber = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *UploadPartInput) SetRequestPayer(v string) *UploadPartInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartInput) SetSSECustomerAlgorithm(v string) *UploadPartInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *UploadPartInput) SetSSECustomerKey(v string) *UploadPartInput { + s.SSECustomerKey = &v + return s +} + +func (s *UploadPartInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartInput) SetSSECustomerKeyMD5(v string) *UploadPartInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput { + s.UploadId = &v + return s +} + +func (s *UploadPartInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *UploadPartInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s UploadPartInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type UploadPartOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to confirm the encryption algorithm + // that's used. + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide the round-trip message integrity + // verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by UploadPartOutput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256, aws:kms). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UploadPartOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UploadPartOutput) GoString() string { + return s.String() +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *UploadPartOutput) SetBucketKeyEnabled(v bool) *UploadPartOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *UploadPartOutput) SetChecksumCRC32(v string) *UploadPartOutput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *UploadPartOutput) SetChecksumCRC32C(v string) *UploadPartOutput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *UploadPartOutput) SetChecksumSHA1(v string) *UploadPartOutput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *UploadPartOutput) SetChecksumSHA256(v string) *UploadPartOutput { + s.ChecksumSHA256 = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *UploadPartOutput) SetETag(v string) *UploadPartOutput { + s.ETag = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *UploadPartOutput) SetRequestCharged(v string) *UploadPartOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartOutput) SetSSECustomerAlgorithm(v string) *UploadPartOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartOutput) SetSSECustomerKeyMD5(v string) *UploadPartOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *UploadPartOutput) SetSSEKMSKeyId(v string) *UploadPartOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput { + s.ServerSideEncryption = &v + return s +} + +// Describes the versioning state of an Amazon S3 bucket. For more information, +// see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) +// in the Amazon S3 API Reference. +type VersioningConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADelete"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s VersioningConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s VersioningConfiguration) GoString() string { + return s.String() +} + +// SetMFADelete sets the MFADelete field's value. +func (s *VersioningConfiguration) SetMFADelete(v string) *VersioningConfiguration { + s.MFADelete = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration { + s.Status = &v + return s +} + +// Specifies website configuration parameters for an Amazon S3 bucket. +type WebsiteConfiguration struct { + _ struct{} `type:"structure"` + + // The name of the error document for the website. + ErrorDocument *ErrorDocument `type:"structure"` + + // The name of the index document for the website. + IndexDocument *IndexDocument `type:"structure"` + + // The redirect behavior for every request to this bucket's website endpoint. + // + // If you specify this property, you can't specify any other property. + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + // Rules that define when a redirect is applied and the redirect behavior. + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s WebsiteConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s WebsiteConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WebsiteConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WebsiteConfiguration"} + if s.ErrorDocument != nil { + if err := s.ErrorDocument.Validate(); err != nil { + invalidParams.AddNested("ErrorDocument", err.(request.ErrInvalidParams)) + } + } + if s.IndexDocument != nil { + if err := s.IndexDocument.Validate(); err != nil { + invalidParams.AddNested("IndexDocument", err.(request.ErrInvalidParams)) + } + } + if s.RedirectAllRequestsTo != nil { + if err := s.RedirectAllRequestsTo.Validate(); err != nil { + invalidParams.AddNested("RedirectAllRequestsTo", err.(request.ErrInvalidParams)) + } + } + if s.RoutingRules != nil { + for i, v := range s.RoutingRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RoutingRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetErrorDocument sets the ErrorDocument field's value. +func (s *WebsiteConfiguration) SetErrorDocument(v *ErrorDocument) *WebsiteConfiguration { + s.ErrorDocument = v + return s +} + +// SetIndexDocument sets the IndexDocument field's value. +func (s *WebsiteConfiguration) SetIndexDocument(v *IndexDocument) *WebsiteConfiguration { + s.IndexDocument = v + return s +} + +// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. +func (s *WebsiteConfiguration) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *WebsiteConfiguration { + s.RedirectAllRequestsTo = v + return s +} + +// SetRoutingRules sets the RoutingRules field's value. +func (s *WebsiteConfiguration) SetRoutingRules(v []*RoutingRule) *WebsiteConfiguration { + s.RoutingRules = v + return s +} + +type WriteGetObjectResponseInput struct { + _ struct{} `locationName:"WriteGetObjectResponseRequest" type:"structure" payload:"Body"` + + // Indicates that a range of bytes was specified. + AcceptRanges *string `location:"header" locationName:"x-amz-fwd-header-accept-ranges" type:"string"` + + // The object data. + // + // To use an non-seekable io.Reader for this request wrap the io.Reader with + // "aws.ReadSeekCloser". The SDK will not retry request errors for non-seekable + // readers. This will allow the SDK to send the reader's payload as chunked + // transfer encoding. + Body io.ReadSeeker `type:"blob"` + + // Indicates whether the object stored in Amazon S3 uses an S3 bucket key for + // server-side encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"x-amz-fwd-header-Cache-Control" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the base64-encoded, + // 32-bit CRC32 checksum of the object returned by the Object Lambda function. + // This may not match the checksum for the object stored in Amazon S3. Amazon + // S3 will perform validation of the checksum values only when the original + // GetObject request required checksum validation. For more information about + // checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-fwd-header-x-amz-checksum-crc32" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the base64-encoded, + // 32-bit CRC32C checksum of the object returned by the Object Lambda function. + // This may not match the checksum for the object stored in Amazon S3. Amazon + // S3 will perform validation of the checksum values only when the original + // GetObject request required checksum validation. For more information about + // checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-fwd-header-x-amz-checksum-crc32c" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the base64-encoded, + // 160-bit SHA-1 digest of the object returned by the Object Lambda function. + // This may not match the checksum for the object stored in Amazon S3. Amazon + // S3 will perform validation of the checksum values only when the original + // GetObject request required checksum validation. For more information about + // checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-fwd-header-x-amz-checksum-sha1" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the base64-encoded, + // 256-bit SHA-256 digest of the object returned by the Object Lambda function. + // This may not match the checksum for the object stored in Amazon S3. Amazon + // S3 will perform validation of the checksum values only when the original + // GetObject request required checksum validation. For more information about + // checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-fwd-header-x-amz-checksum-sha256" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"x-amz-fwd-header-Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"x-amz-fwd-header-Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"x-amz-fwd-header-Content-Language" type:"string"` + + // The size of the content body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The portion of the object returned in the response. + ContentRange *string `location:"header" locationName:"x-amz-fwd-header-Content-Range" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"x-amz-fwd-header-Content-Type" type:"string"` + + // Specifies whether an object stored in Amazon S3 is (true) or is not (false) + // a delete marker. + DeleteMarker *bool `location:"header" locationName:"x-amz-fwd-header-x-amz-delete-marker" type:"boolean"` + + // An opaque identifier assigned by a web server to a specific version of a + // resource found at a URL. + ETag *string `location:"header" locationName:"x-amz-fwd-header-ETag" type:"string"` + + // A string that uniquely identifies an error condition. Returned in the + // tag of the error XML response for a corresponding GetObject call. Cannot + // be used with a successful StatusCode header or when the transformed object + // is provided in the body. All error codes from S3 are sentence-cased. The + // regular expression (regex) value is "^[A-Z][a-zA-Z]+$". + ErrorCode *string `location:"header" locationName:"x-amz-fwd-error-code" type:"string"` + + // Contains a generic description of the error condition. Returned in the + // tag of the error XML response for a corresponding GetObject call. Cannot + // be used with a successful StatusCode header or when the transformed object + // is provided in body. + ErrorMessage *string `location:"header" locationName:"x-amz-fwd-error-message" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key-value pairs + // that provide the object expiration information. The value of the rule-id + // is URL-encoded. + Expiration *string `location:"header" locationName:"x-amz-fwd-header-x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"x-amz-fwd-header-Expires" type:"timestamp"` + + // The date and time that the object was last modified. + LastModified *time.Time `location:"header" locationName:"x-amz-fwd-header-Last-Modified" type:"timestamp"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Set to the number of metadata entries not returned in x-amz-meta headers. + // This can happen if you create metadata using an API like SOAP that supports + // more flexible metadata than the REST API. For example, using SOAP, you can + // create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-fwd-header-x-amz-missing-meta" type:"integer"` + + // Indicates whether an object stored in Amazon S3 has an active legal hold. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-fwd-header-x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // Indicates whether an object stored in Amazon S3 has Object Lock enabled. + // For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html). + ObjectLockMode *string `location:"header" locationName:"x-amz-fwd-header-x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when Object Lock is configured to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-fwd-header-x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // The count of parts this object has. + PartsCount *int64 `location:"header" locationName:"x-amz-fwd-header-x-amz-mp-parts-count" type:"integer"` + + // Indicates if request involves bucket that is either a source or destination + // in a Replication rule. For more information about S3 Replication, see Replication + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html). + ReplicationStatus *string `location:"header" locationName:"x-amz-fwd-header-x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-fwd-header-x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Route prefix to the HTTP URL generated. + // + // RequestRoute is a required field + RequestRoute *string `location:"header" locationName:"x-amz-request-route" type:"string" required:"true"` + + // A single use encrypted token that maps WriteGetObjectResponse to the end + // user GetObject request. + // + // RequestToken is a required field + RequestToken *string `location:"header" locationName:"x-amz-request-token" type:"string" required:"true"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-fwd-header-x-amz-restore" type:"string"` + + // Encryption algorithm used if server-side encryption with a customer-provided + // encryption key was specified for object stored in Amazon S3. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // 128-bit MD5 digest of customer-provided encryption key used in Amazon S3 + // to encrypt data stored in S3. For more information, see Protecting data using + // server-side encryption with customer-provided encryption keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html). + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID (Key ID, Key ARN, or Key Alias) of the Amazon + // Web Services Key Management Service (Amazon Web Services KMS) symmetric encryption + // customer managed key that was used for stored in Amazon S3 object. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by WriteGetObjectResponseInput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing requested object in + // Amazon S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The integer status code for an HTTP response of a corresponding GetObject + // request. The following is a list of status codes. + // + // * 200 - OK + // + // * 206 - Partial Content + // + // * 304 - Not Modified + // + // * 400 - Bad Request + // + // * 401 - Unauthorized + // + // * 403 - Forbidden + // + // * 404 - Not Found + // + // * 405 - Method Not Allowed + // + // * 409 - Conflict + // + // * 411 - Length Required + // + // * 412 - Precondition Failed + // + // * 416 - Range Not Satisfiable + // + // * 500 - Internal Server Error + // + // * 503 - Service Unavailable + StatusCode *int64 `location:"header" locationName:"x-amz-fwd-status" type:"integer"` + + // Provides storage class information of the object. Amazon S3 returns this + // header for all objects except for S3 Standard storage class objects. + // + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). + StorageClass *string `location:"header" locationName:"x-amz-fwd-header-x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The number of tags, if any, on the object. + TagCount *int64 `location:"header" locationName:"x-amz-fwd-header-x-amz-tagging-count" type:"integer"` + + // An ID used to reference a specific version of the object. + VersionId *string `location:"header" locationName:"x-amz-fwd-header-x-amz-version-id" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s WriteGetObjectResponseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s WriteGetObjectResponseInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WriteGetObjectResponseInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WriteGetObjectResponseInput"} + if s.RequestRoute == nil { + invalidParams.Add(request.NewErrParamRequired("RequestRoute")) + } + if s.RequestRoute != nil && len(*s.RequestRoute) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RequestRoute", 1)) + } + if s.RequestToken == nil { + invalidParams.Add(request.NewErrParamRequired("RequestToken")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAcceptRanges sets the AcceptRanges field's value. +func (s *WriteGetObjectResponseInput) SetAcceptRanges(v string) *WriteGetObjectResponseInput { + s.AcceptRanges = &v + return s +} + +// SetBody sets the Body field's value. +func (s *WriteGetObjectResponseInput) SetBody(v io.ReadSeeker) *WriteGetObjectResponseInput { + s.Body = v + return s +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *WriteGetObjectResponseInput) SetBucketKeyEnabled(v bool) *WriteGetObjectResponseInput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *WriteGetObjectResponseInput) SetCacheControl(v string) *WriteGetObjectResponseInput { + s.CacheControl = &v + return s +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *WriteGetObjectResponseInput) SetChecksumCRC32(v string) *WriteGetObjectResponseInput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *WriteGetObjectResponseInput) SetChecksumCRC32C(v string) *WriteGetObjectResponseInput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *WriteGetObjectResponseInput) SetChecksumSHA1(v string) *WriteGetObjectResponseInput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *WriteGetObjectResponseInput) SetChecksumSHA256(v string) *WriteGetObjectResponseInput { + s.ChecksumSHA256 = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *WriteGetObjectResponseInput) SetContentDisposition(v string) *WriteGetObjectResponseInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *WriteGetObjectResponseInput) SetContentEncoding(v string) *WriteGetObjectResponseInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *WriteGetObjectResponseInput) SetContentLanguage(v string) *WriteGetObjectResponseInput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *WriteGetObjectResponseInput) SetContentLength(v int64) *WriteGetObjectResponseInput { + s.ContentLength = &v + return s +} + +// SetContentRange sets the ContentRange field's value. +func (s *WriteGetObjectResponseInput) SetContentRange(v string) *WriteGetObjectResponseInput { + s.ContentRange = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *WriteGetObjectResponseInput) SetContentType(v string) *WriteGetObjectResponseInput { + s.ContentType = &v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *WriteGetObjectResponseInput) SetDeleteMarker(v bool) *WriteGetObjectResponseInput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *WriteGetObjectResponseInput) SetETag(v string) *WriteGetObjectResponseInput { + s.ETag = &v + return s +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *WriteGetObjectResponseInput) SetErrorCode(v string) *WriteGetObjectResponseInput { + s.ErrorCode = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *WriteGetObjectResponseInput) SetErrorMessage(v string) *WriteGetObjectResponseInput { + s.ErrorMessage = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *WriteGetObjectResponseInput) SetExpiration(v string) *WriteGetObjectResponseInput { + s.Expiration = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *WriteGetObjectResponseInput) SetExpires(v time.Time) *WriteGetObjectResponseInput { + s.Expires = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *WriteGetObjectResponseInput) SetLastModified(v time.Time) *WriteGetObjectResponseInput { + s.LastModified = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *WriteGetObjectResponseInput) SetMetadata(v map[string]*string) *WriteGetObjectResponseInput { + s.Metadata = v + return s +} + +// SetMissingMeta sets the MissingMeta field's value. +func (s *WriteGetObjectResponseInput) SetMissingMeta(v int64) *WriteGetObjectResponseInput { + s.MissingMeta = &v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *WriteGetObjectResponseInput) SetObjectLockLegalHoldStatus(v string) *WriteGetObjectResponseInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *WriteGetObjectResponseInput) SetObjectLockMode(v string) *WriteGetObjectResponseInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *WriteGetObjectResponseInput) SetObjectLockRetainUntilDate(v time.Time) *WriteGetObjectResponseInput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetPartsCount sets the PartsCount field's value. +func (s *WriteGetObjectResponseInput) SetPartsCount(v int64) *WriteGetObjectResponseInput { + s.PartsCount = &v + return s +} + +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *WriteGetObjectResponseInput) SetReplicationStatus(v string) *WriteGetObjectResponseInput { + s.ReplicationStatus = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *WriteGetObjectResponseInput) SetRequestCharged(v string) *WriteGetObjectResponseInput { + s.RequestCharged = &v + return s +} + +// SetRequestRoute sets the RequestRoute field's value. +func (s *WriteGetObjectResponseInput) SetRequestRoute(v string) *WriteGetObjectResponseInput { + s.RequestRoute = &v + return s +} + +// SetRequestToken sets the RequestToken field's value. +func (s *WriteGetObjectResponseInput) SetRequestToken(v string) *WriteGetObjectResponseInput { + s.RequestToken = &v + return s +} + +// SetRestore sets the Restore field's value. +func (s *WriteGetObjectResponseInput) SetRestore(v string) *WriteGetObjectResponseInput { + s.Restore = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *WriteGetObjectResponseInput) SetSSECustomerAlgorithm(v string) *WriteGetObjectResponseInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *WriteGetObjectResponseInput) SetSSECustomerKeyMD5(v string) *WriteGetObjectResponseInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *WriteGetObjectResponseInput) SetSSEKMSKeyId(v string) *WriteGetObjectResponseInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *WriteGetObjectResponseInput) SetServerSideEncryption(v string) *WriteGetObjectResponseInput { + s.ServerSideEncryption = &v + return s +} + +// SetStatusCode sets the StatusCode field's value. +func (s *WriteGetObjectResponseInput) SetStatusCode(v int64) *WriteGetObjectResponseInput { + s.StatusCode = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *WriteGetObjectResponseInput) SetStorageClass(v string) *WriteGetObjectResponseInput { + s.StorageClass = &v + return s +} + +// SetTagCount sets the TagCount field's value. +func (s *WriteGetObjectResponseInput) SetTagCount(v int64) *WriteGetObjectResponseInput { + s.TagCount = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *WriteGetObjectResponseInput) SetVersionId(v string) *WriteGetObjectResponseInput { + s.VersionId = &v + return s +} + +func (s *WriteGetObjectResponseInput) hostLabels() map[string]string { + return map[string]string{ + "RequestRoute": aws.StringValue(s.RequestRoute), + } +} + +type WriteGetObjectResponseOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s WriteGetObjectResponseOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s WriteGetObjectResponseOutput) GoString() string { + return s.String() +} + +const ( + // AnalyticsS3ExportFileFormatCsv is a AnalyticsS3ExportFileFormat enum value + AnalyticsS3ExportFileFormatCsv = "CSV" +) + +// AnalyticsS3ExportFileFormat_Values returns all elements of the AnalyticsS3ExportFileFormat enum +func AnalyticsS3ExportFileFormat_Values() []string { + return []string{ + AnalyticsS3ExportFileFormatCsv, + } +} + +const ( + // ArchiveStatusArchiveAccess is a ArchiveStatus enum value + ArchiveStatusArchiveAccess = "ARCHIVE_ACCESS" + + // ArchiveStatusDeepArchiveAccess is a ArchiveStatus enum value + ArchiveStatusDeepArchiveAccess = "DEEP_ARCHIVE_ACCESS" +) + +// ArchiveStatus_Values returns all elements of the ArchiveStatus enum +func ArchiveStatus_Values() []string { + return []string{ + ArchiveStatusArchiveAccess, + ArchiveStatusDeepArchiveAccess, + } +} + +const ( + // BucketAccelerateStatusEnabled is a BucketAccelerateStatus enum value + BucketAccelerateStatusEnabled = "Enabled" + + // BucketAccelerateStatusSuspended is a BucketAccelerateStatus enum value + BucketAccelerateStatusSuspended = "Suspended" +) + +// BucketAccelerateStatus_Values returns all elements of the BucketAccelerateStatus enum +func BucketAccelerateStatus_Values() []string { + return []string{ + BucketAccelerateStatusEnabled, + BucketAccelerateStatusSuspended, + } +} + +const ( + // BucketCannedACLPrivate is a BucketCannedACL enum value + BucketCannedACLPrivate = "private" + + // BucketCannedACLPublicRead is a BucketCannedACL enum value + BucketCannedACLPublicRead = "public-read" + + // BucketCannedACLPublicReadWrite is a BucketCannedACL enum value + BucketCannedACLPublicReadWrite = "public-read-write" + + // BucketCannedACLAuthenticatedRead is a BucketCannedACL enum value + BucketCannedACLAuthenticatedRead = "authenticated-read" +) + +// BucketCannedACL_Values returns all elements of the BucketCannedACL enum +func BucketCannedACL_Values() []string { + return []string{ + BucketCannedACLPrivate, + BucketCannedACLPublicRead, + BucketCannedACLPublicReadWrite, + BucketCannedACLAuthenticatedRead, + } +} + +const ( + // BucketLocationConstraintAfSouth1 is a BucketLocationConstraint enum value + BucketLocationConstraintAfSouth1 = "af-south-1" + + // BucketLocationConstraintApEast1 is a BucketLocationConstraint enum value + BucketLocationConstraintApEast1 = "ap-east-1" + + // BucketLocationConstraintApNortheast1 is a BucketLocationConstraint enum value + BucketLocationConstraintApNortheast1 = "ap-northeast-1" + + // BucketLocationConstraintApNortheast2 is a BucketLocationConstraint enum value + BucketLocationConstraintApNortheast2 = "ap-northeast-2" + + // BucketLocationConstraintApNortheast3 is a BucketLocationConstraint enum value + BucketLocationConstraintApNortheast3 = "ap-northeast-3" + + // BucketLocationConstraintApSouth1 is a BucketLocationConstraint enum value + BucketLocationConstraintApSouth1 = "ap-south-1" + + // BucketLocationConstraintApSouth2 is a BucketLocationConstraint enum value + BucketLocationConstraintApSouth2 = "ap-south-2" + + // BucketLocationConstraintApSoutheast1 is a BucketLocationConstraint enum value + BucketLocationConstraintApSoutheast1 = "ap-southeast-1" + + // BucketLocationConstraintApSoutheast2 is a BucketLocationConstraint enum value + BucketLocationConstraintApSoutheast2 = "ap-southeast-2" + + // BucketLocationConstraintApSoutheast3 is a BucketLocationConstraint enum value + BucketLocationConstraintApSoutheast3 = "ap-southeast-3" + + // BucketLocationConstraintCaCentral1 is a BucketLocationConstraint enum value + BucketLocationConstraintCaCentral1 = "ca-central-1" + + // BucketLocationConstraintCnNorth1 is a BucketLocationConstraint enum value + BucketLocationConstraintCnNorth1 = "cn-north-1" + + // BucketLocationConstraintCnNorthwest1 is a BucketLocationConstraint enum value + BucketLocationConstraintCnNorthwest1 = "cn-northwest-1" + + // BucketLocationConstraintEu is a BucketLocationConstraint enum value + BucketLocationConstraintEu = "EU" + + // BucketLocationConstraintEuCentral1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuCentral1 = "eu-central-1" + + // BucketLocationConstraintEuNorth1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuNorth1 = "eu-north-1" + + // BucketLocationConstraintEuSouth1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuSouth1 = "eu-south-1" + + // BucketLocationConstraintEuSouth2 is a BucketLocationConstraint enum value + BucketLocationConstraintEuSouth2 = "eu-south-2" + + // BucketLocationConstraintEuWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuWest1 = "eu-west-1" + + // BucketLocationConstraintEuWest2 is a BucketLocationConstraint enum value + BucketLocationConstraintEuWest2 = "eu-west-2" + + // BucketLocationConstraintEuWest3 is a BucketLocationConstraint enum value + BucketLocationConstraintEuWest3 = "eu-west-3" + + // BucketLocationConstraintMeSouth1 is a BucketLocationConstraint enum value + BucketLocationConstraintMeSouth1 = "me-south-1" + + // BucketLocationConstraintSaEast1 is a BucketLocationConstraint enum value + BucketLocationConstraintSaEast1 = "sa-east-1" + + // BucketLocationConstraintUsEast2 is a BucketLocationConstraint enum value + BucketLocationConstraintUsEast2 = "us-east-2" + + // BucketLocationConstraintUsGovEast1 is a BucketLocationConstraint enum value + BucketLocationConstraintUsGovEast1 = "us-gov-east-1" + + // BucketLocationConstraintUsGovWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintUsGovWest1 = "us-gov-west-1" + + // BucketLocationConstraintUsWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintUsWest1 = "us-west-1" + + // BucketLocationConstraintUsWest2 is a BucketLocationConstraint enum value + BucketLocationConstraintUsWest2 = "us-west-2" +) + +// BucketLocationConstraint_Values returns all elements of the BucketLocationConstraint enum +func BucketLocationConstraint_Values() []string { + return []string{ + BucketLocationConstraintAfSouth1, + BucketLocationConstraintApEast1, + BucketLocationConstraintApNortheast1, + BucketLocationConstraintApNortheast2, + BucketLocationConstraintApNortheast3, + BucketLocationConstraintApSouth1, + BucketLocationConstraintApSouth2, + BucketLocationConstraintApSoutheast1, + BucketLocationConstraintApSoutheast2, + BucketLocationConstraintApSoutheast3, + BucketLocationConstraintCaCentral1, + BucketLocationConstraintCnNorth1, + BucketLocationConstraintCnNorthwest1, + BucketLocationConstraintEu, + BucketLocationConstraintEuCentral1, + BucketLocationConstraintEuNorth1, + BucketLocationConstraintEuSouth1, + BucketLocationConstraintEuSouth2, + BucketLocationConstraintEuWest1, + BucketLocationConstraintEuWest2, + BucketLocationConstraintEuWest3, + BucketLocationConstraintMeSouth1, + BucketLocationConstraintSaEast1, + BucketLocationConstraintUsEast2, + BucketLocationConstraintUsGovEast1, + BucketLocationConstraintUsGovWest1, + BucketLocationConstraintUsWest1, + BucketLocationConstraintUsWest2, + } +} + +const ( + // BucketLogsPermissionFullControl is a BucketLogsPermission enum value + BucketLogsPermissionFullControl = "FULL_CONTROL" + + // BucketLogsPermissionRead is a BucketLogsPermission enum value + BucketLogsPermissionRead = "READ" + + // BucketLogsPermissionWrite is a BucketLogsPermission enum value + BucketLogsPermissionWrite = "WRITE" +) + +// BucketLogsPermission_Values returns all elements of the BucketLogsPermission enum +func BucketLogsPermission_Values() []string { + return []string{ + BucketLogsPermissionFullControl, + BucketLogsPermissionRead, + BucketLogsPermissionWrite, + } +} + +const ( + // BucketTypeDirectory is a BucketType enum value + BucketTypeDirectory = "Directory" +) + +// BucketType_Values returns all elements of the BucketType enum +func BucketType_Values() []string { + return []string{ + BucketTypeDirectory, + } +} + +const ( + // BucketVersioningStatusEnabled is a BucketVersioningStatus enum value + BucketVersioningStatusEnabled = "Enabled" + + // BucketVersioningStatusSuspended is a BucketVersioningStatus enum value + BucketVersioningStatusSuspended = "Suspended" +) + +// BucketVersioningStatus_Values returns all elements of the BucketVersioningStatus enum +func BucketVersioningStatus_Values() []string { + return []string{ + BucketVersioningStatusEnabled, + BucketVersioningStatusSuspended, + } +} + +const ( + // ChecksumAlgorithmCrc32 is a ChecksumAlgorithm enum value + ChecksumAlgorithmCrc32 = "CRC32" + + // ChecksumAlgorithmCrc32c is a ChecksumAlgorithm enum value + ChecksumAlgorithmCrc32c = "CRC32C" + + // ChecksumAlgorithmSha1 is a ChecksumAlgorithm enum value + ChecksumAlgorithmSha1 = "SHA1" + + // ChecksumAlgorithmSha256 is a ChecksumAlgorithm enum value + ChecksumAlgorithmSha256 = "SHA256" +) + +// ChecksumAlgorithm_Values returns all elements of the ChecksumAlgorithm enum +func ChecksumAlgorithm_Values() []string { + return []string{ + ChecksumAlgorithmCrc32, + ChecksumAlgorithmCrc32c, + ChecksumAlgorithmSha1, + ChecksumAlgorithmSha256, + } +} + +const ( + // ChecksumModeEnabled is a ChecksumMode enum value + ChecksumModeEnabled = "ENABLED" +) + +// ChecksumMode_Values returns all elements of the ChecksumMode enum +func ChecksumMode_Values() []string { + return []string{ + ChecksumModeEnabled, + } +} + +const ( + // CompressionTypeNone is a CompressionType enum value + CompressionTypeNone = "NONE" + + // CompressionTypeGzip is a CompressionType enum value + CompressionTypeGzip = "GZIP" + + // CompressionTypeBzip2 is a CompressionType enum value + CompressionTypeBzip2 = "BZIP2" +) + +// CompressionType_Values returns all elements of the CompressionType enum +func CompressionType_Values() []string { + return []string{ + CompressionTypeNone, + CompressionTypeGzip, + CompressionTypeBzip2, + } +} + +const ( + // DataRedundancySingleAvailabilityZone is a DataRedundancy enum value + DataRedundancySingleAvailabilityZone = "SingleAvailabilityZone" +) + +// DataRedundancy_Values returns all elements of the DataRedundancy enum +func DataRedundancy_Values() []string { + return []string{ + DataRedundancySingleAvailabilityZone, + } +} + +const ( + // DeleteMarkerReplicationStatusEnabled is a DeleteMarkerReplicationStatus enum value + DeleteMarkerReplicationStatusEnabled = "Enabled" + + // DeleteMarkerReplicationStatusDisabled is a DeleteMarkerReplicationStatus enum value + DeleteMarkerReplicationStatusDisabled = "Disabled" +) + +// DeleteMarkerReplicationStatus_Values returns all elements of the DeleteMarkerReplicationStatus enum +func DeleteMarkerReplicationStatus_Values() []string { + return []string{ + DeleteMarkerReplicationStatusEnabled, + DeleteMarkerReplicationStatusDisabled, + } +} + +// Requests Amazon S3 to encode the object keys in the response and specifies +// the encoding method to use. An object key can contain any Unicode character; +// however, the XML 1.0 parser cannot parse some characters, such as characters +// with an ASCII value from 0 to 10. For characters that are not supported in +// XML 1.0, you can add this parameter to request that Amazon S3 encode the +// keys in the response. +const ( + // EncodingTypeUrl is a EncodingType enum value + EncodingTypeUrl = "url" +) + +// EncodingType_Values returns all elements of the EncodingType enum +func EncodingType_Values() []string { + return []string{ + EncodingTypeUrl, + } +} + +// The bucket event for which to send notifications. +const ( + // EventS3ReducedRedundancyLostObject is a Event enum value + EventS3ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" + + // EventS3ObjectCreated is a Event enum value + EventS3ObjectCreated = "s3:ObjectCreated:*" + + // EventS3ObjectCreatedPut is a Event enum value + EventS3ObjectCreatedPut = "s3:ObjectCreated:Put" + + // EventS3ObjectCreatedPost is a Event enum value + EventS3ObjectCreatedPost = "s3:ObjectCreated:Post" + + // EventS3ObjectCreatedCopy is a Event enum value + EventS3ObjectCreatedCopy = "s3:ObjectCreated:Copy" + + // EventS3ObjectCreatedCompleteMultipartUpload is a Event enum value + EventS3ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" + + // EventS3ObjectRemoved is a Event enum value + EventS3ObjectRemoved = "s3:ObjectRemoved:*" + + // EventS3ObjectRemovedDelete is a Event enum value + EventS3ObjectRemovedDelete = "s3:ObjectRemoved:Delete" + + // EventS3ObjectRemovedDeleteMarkerCreated is a Event enum value + EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" + + // EventS3ObjectRestore is a Event enum value + EventS3ObjectRestore = "s3:ObjectRestore:*" + + // EventS3ObjectRestorePost is a Event enum value + EventS3ObjectRestorePost = "s3:ObjectRestore:Post" + + // EventS3ObjectRestoreCompleted is a Event enum value + EventS3ObjectRestoreCompleted = "s3:ObjectRestore:Completed" + + // EventS3Replication is a Event enum value + EventS3Replication = "s3:Replication:*" + + // EventS3ReplicationOperationFailedReplication is a Event enum value + EventS3ReplicationOperationFailedReplication = "s3:Replication:OperationFailedReplication" + + // EventS3ReplicationOperationNotTracked is a Event enum value + EventS3ReplicationOperationNotTracked = "s3:Replication:OperationNotTracked" + + // EventS3ReplicationOperationMissedThreshold is a Event enum value + EventS3ReplicationOperationMissedThreshold = "s3:Replication:OperationMissedThreshold" + + // EventS3ReplicationOperationReplicatedAfterThreshold is a Event enum value + EventS3ReplicationOperationReplicatedAfterThreshold = "s3:Replication:OperationReplicatedAfterThreshold" + + // EventS3ObjectRestoreDelete is a Event enum value + EventS3ObjectRestoreDelete = "s3:ObjectRestore:Delete" + + // EventS3LifecycleTransition is a Event enum value + EventS3LifecycleTransition = "s3:LifecycleTransition" + + // EventS3IntelligentTiering is a Event enum value + EventS3IntelligentTiering = "s3:IntelligentTiering" + + // EventS3ObjectAclPut is a Event enum value + EventS3ObjectAclPut = "s3:ObjectAcl:Put" + + // EventS3LifecycleExpiration is a Event enum value + EventS3LifecycleExpiration = "s3:LifecycleExpiration:*" + + // EventS3LifecycleExpirationDelete is a Event enum value + EventS3LifecycleExpirationDelete = "s3:LifecycleExpiration:Delete" + + // EventS3LifecycleExpirationDeleteMarkerCreated is a Event enum value + EventS3LifecycleExpirationDeleteMarkerCreated = "s3:LifecycleExpiration:DeleteMarkerCreated" + + // EventS3ObjectTagging is a Event enum value + EventS3ObjectTagging = "s3:ObjectTagging:*" + + // EventS3ObjectTaggingPut is a Event enum value + EventS3ObjectTaggingPut = "s3:ObjectTagging:Put" + + // EventS3ObjectTaggingDelete is a Event enum value + EventS3ObjectTaggingDelete = "s3:ObjectTagging:Delete" +) + +// Event_Values returns all elements of the Event enum +func Event_Values() []string { + return []string{ + EventS3ReducedRedundancyLostObject, + EventS3ObjectCreated, + EventS3ObjectCreatedPut, + EventS3ObjectCreatedPost, + EventS3ObjectCreatedCopy, + EventS3ObjectCreatedCompleteMultipartUpload, + EventS3ObjectRemoved, + EventS3ObjectRemovedDelete, + EventS3ObjectRemovedDeleteMarkerCreated, + EventS3ObjectRestore, + EventS3ObjectRestorePost, + EventS3ObjectRestoreCompleted, + EventS3Replication, + EventS3ReplicationOperationFailedReplication, + EventS3ReplicationOperationNotTracked, + EventS3ReplicationOperationMissedThreshold, + EventS3ReplicationOperationReplicatedAfterThreshold, + EventS3ObjectRestoreDelete, + EventS3LifecycleTransition, + EventS3IntelligentTiering, + EventS3ObjectAclPut, + EventS3LifecycleExpiration, + EventS3LifecycleExpirationDelete, + EventS3LifecycleExpirationDeleteMarkerCreated, + EventS3ObjectTagging, + EventS3ObjectTaggingPut, + EventS3ObjectTaggingDelete, + } +} + +const ( + // ExistingObjectReplicationStatusEnabled is a ExistingObjectReplicationStatus enum value + ExistingObjectReplicationStatusEnabled = "Enabled" + + // ExistingObjectReplicationStatusDisabled is a ExistingObjectReplicationStatus enum value + ExistingObjectReplicationStatusDisabled = "Disabled" +) + +// ExistingObjectReplicationStatus_Values returns all elements of the ExistingObjectReplicationStatus enum +func ExistingObjectReplicationStatus_Values() []string { + return []string{ + ExistingObjectReplicationStatusEnabled, + ExistingObjectReplicationStatusDisabled, + } +} + +const ( + // ExpirationStatusEnabled is a ExpirationStatus enum value + ExpirationStatusEnabled = "Enabled" + + // ExpirationStatusDisabled is a ExpirationStatus enum value + ExpirationStatusDisabled = "Disabled" +) + +// ExpirationStatus_Values returns all elements of the ExpirationStatus enum +func ExpirationStatus_Values() []string { + return []string{ + ExpirationStatusEnabled, + ExpirationStatusDisabled, + } +} + +const ( + // ExpressionTypeSql is a ExpressionType enum value + ExpressionTypeSql = "SQL" +) + +// ExpressionType_Values returns all elements of the ExpressionType enum +func ExpressionType_Values() []string { + return []string{ + ExpressionTypeSql, + } +} + +const ( + // FileHeaderInfoUse is a FileHeaderInfo enum value + FileHeaderInfoUse = "USE" + + // FileHeaderInfoIgnore is a FileHeaderInfo enum value + FileHeaderInfoIgnore = "IGNORE" + + // FileHeaderInfoNone is a FileHeaderInfo enum value + FileHeaderInfoNone = "NONE" +) + +// FileHeaderInfo_Values returns all elements of the FileHeaderInfo enum +func FileHeaderInfo_Values() []string { + return []string{ + FileHeaderInfoUse, + FileHeaderInfoIgnore, + FileHeaderInfoNone, + } +} + +const ( + // FilterRuleNamePrefix is a FilterRuleName enum value + FilterRuleNamePrefix = "prefix" + + // FilterRuleNameSuffix is a FilterRuleName enum value + FilterRuleNameSuffix = "suffix" +) + +// FilterRuleName_Values returns all elements of the FilterRuleName enum +func FilterRuleName_Values() []string { + return []string{ + FilterRuleNamePrefix, + FilterRuleNameSuffix, + } +} + +const ( + // IntelligentTieringAccessTierArchiveAccess is a IntelligentTieringAccessTier enum value + IntelligentTieringAccessTierArchiveAccess = "ARCHIVE_ACCESS" + + // IntelligentTieringAccessTierDeepArchiveAccess is a IntelligentTieringAccessTier enum value + IntelligentTieringAccessTierDeepArchiveAccess = "DEEP_ARCHIVE_ACCESS" +) + +// IntelligentTieringAccessTier_Values returns all elements of the IntelligentTieringAccessTier enum +func IntelligentTieringAccessTier_Values() []string { + return []string{ + IntelligentTieringAccessTierArchiveAccess, + IntelligentTieringAccessTierDeepArchiveAccess, + } +} + +const ( + // IntelligentTieringStatusEnabled is a IntelligentTieringStatus enum value + IntelligentTieringStatusEnabled = "Enabled" + + // IntelligentTieringStatusDisabled is a IntelligentTieringStatus enum value + IntelligentTieringStatusDisabled = "Disabled" +) + +// IntelligentTieringStatus_Values returns all elements of the IntelligentTieringStatus enum +func IntelligentTieringStatus_Values() []string { + return []string{ + IntelligentTieringStatusEnabled, + IntelligentTieringStatusDisabled, + } +} + +const ( + // InventoryFormatCsv is a InventoryFormat enum value + InventoryFormatCsv = "CSV" + + // InventoryFormatOrc is a InventoryFormat enum value + InventoryFormatOrc = "ORC" + + // InventoryFormatParquet is a InventoryFormat enum value + InventoryFormatParquet = "Parquet" +) + +// InventoryFormat_Values returns all elements of the InventoryFormat enum +func InventoryFormat_Values() []string { + return []string{ + InventoryFormatCsv, + InventoryFormatOrc, + InventoryFormatParquet, + } +} + +const ( + // InventoryFrequencyDaily is a InventoryFrequency enum value + InventoryFrequencyDaily = "Daily" + + // InventoryFrequencyWeekly is a InventoryFrequency enum value + InventoryFrequencyWeekly = "Weekly" +) + +// InventoryFrequency_Values returns all elements of the InventoryFrequency enum +func InventoryFrequency_Values() []string { + return []string{ + InventoryFrequencyDaily, + InventoryFrequencyWeekly, + } +} + +const ( + // InventoryIncludedObjectVersionsAll is a InventoryIncludedObjectVersions enum value + InventoryIncludedObjectVersionsAll = "All" + + // InventoryIncludedObjectVersionsCurrent is a InventoryIncludedObjectVersions enum value + InventoryIncludedObjectVersionsCurrent = "Current" +) + +// InventoryIncludedObjectVersions_Values returns all elements of the InventoryIncludedObjectVersions enum +func InventoryIncludedObjectVersions_Values() []string { + return []string{ + InventoryIncludedObjectVersionsAll, + InventoryIncludedObjectVersionsCurrent, + } +} + +const ( + // InventoryOptionalFieldSize is a InventoryOptionalField enum value + InventoryOptionalFieldSize = "Size" + + // InventoryOptionalFieldLastModifiedDate is a InventoryOptionalField enum value + InventoryOptionalFieldLastModifiedDate = "LastModifiedDate" + + // InventoryOptionalFieldStorageClass is a InventoryOptionalField enum value + InventoryOptionalFieldStorageClass = "StorageClass" + + // InventoryOptionalFieldEtag is a InventoryOptionalField enum value + InventoryOptionalFieldEtag = "ETag" + + // InventoryOptionalFieldIsMultipartUploaded is a InventoryOptionalField enum value + InventoryOptionalFieldIsMultipartUploaded = "IsMultipartUploaded" + + // InventoryOptionalFieldReplicationStatus is a InventoryOptionalField enum value + InventoryOptionalFieldReplicationStatus = "ReplicationStatus" + + // InventoryOptionalFieldEncryptionStatus is a InventoryOptionalField enum value + InventoryOptionalFieldEncryptionStatus = "EncryptionStatus" + + // InventoryOptionalFieldObjectLockRetainUntilDate is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockRetainUntilDate = "ObjectLockRetainUntilDate" + + // InventoryOptionalFieldObjectLockMode is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockMode = "ObjectLockMode" + + // InventoryOptionalFieldObjectLockLegalHoldStatus is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockLegalHoldStatus = "ObjectLockLegalHoldStatus" + + // InventoryOptionalFieldIntelligentTieringAccessTier is a InventoryOptionalField enum value + InventoryOptionalFieldIntelligentTieringAccessTier = "IntelligentTieringAccessTier" + + // InventoryOptionalFieldBucketKeyStatus is a InventoryOptionalField enum value + InventoryOptionalFieldBucketKeyStatus = "BucketKeyStatus" + + // InventoryOptionalFieldChecksumAlgorithm is a InventoryOptionalField enum value + InventoryOptionalFieldChecksumAlgorithm = "ChecksumAlgorithm" + + // InventoryOptionalFieldObjectAccessControlList is a InventoryOptionalField enum value + InventoryOptionalFieldObjectAccessControlList = "ObjectAccessControlList" + + // InventoryOptionalFieldObjectOwner is a InventoryOptionalField enum value + InventoryOptionalFieldObjectOwner = "ObjectOwner" +) + +// InventoryOptionalField_Values returns all elements of the InventoryOptionalField enum +func InventoryOptionalField_Values() []string { + return []string{ + InventoryOptionalFieldSize, + InventoryOptionalFieldLastModifiedDate, + InventoryOptionalFieldStorageClass, + InventoryOptionalFieldEtag, + InventoryOptionalFieldIsMultipartUploaded, + InventoryOptionalFieldReplicationStatus, + InventoryOptionalFieldEncryptionStatus, + InventoryOptionalFieldObjectLockRetainUntilDate, + InventoryOptionalFieldObjectLockMode, + InventoryOptionalFieldObjectLockLegalHoldStatus, + InventoryOptionalFieldIntelligentTieringAccessTier, + InventoryOptionalFieldBucketKeyStatus, + InventoryOptionalFieldChecksumAlgorithm, + InventoryOptionalFieldObjectAccessControlList, + InventoryOptionalFieldObjectOwner, + } +} + +const ( + // JSONTypeDocument is a JSONType enum value + JSONTypeDocument = "DOCUMENT" + + // JSONTypeLines is a JSONType enum value + JSONTypeLines = "LINES" +) + +// JSONType_Values returns all elements of the JSONType enum +func JSONType_Values() []string { + return []string{ + JSONTypeDocument, + JSONTypeLines, + } +} + +const ( + // LocationTypeAvailabilityZone is a LocationType enum value + LocationTypeAvailabilityZone = "AvailabilityZone" +) + +// LocationType_Values returns all elements of the LocationType enum +func LocationType_Values() []string { + return []string{ + LocationTypeAvailabilityZone, + } +} + +const ( + // MFADeleteEnabled is a MFADelete enum value + MFADeleteEnabled = "Enabled" + + // MFADeleteDisabled is a MFADelete enum value + MFADeleteDisabled = "Disabled" +) + +// MFADelete_Values returns all elements of the MFADelete enum +func MFADelete_Values() []string { + return []string{ + MFADeleteEnabled, + MFADeleteDisabled, + } +} + +const ( + // MFADeleteStatusEnabled is a MFADeleteStatus enum value + MFADeleteStatusEnabled = "Enabled" + + // MFADeleteStatusDisabled is a MFADeleteStatus enum value + MFADeleteStatusDisabled = "Disabled" +) + +// MFADeleteStatus_Values returns all elements of the MFADeleteStatus enum +func MFADeleteStatus_Values() []string { + return []string{ + MFADeleteStatusEnabled, + MFADeleteStatusDisabled, + } +} + +const ( + // MetadataDirectiveCopy is a MetadataDirective enum value + MetadataDirectiveCopy = "COPY" + + // MetadataDirectiveReplace is a MetadataDirective enum value + MetadataDirectiveReplace = "REPLACE" +) + +// MetadataDirective_Values returns all elements of the MetadataDirective enum +func MetadataDirective_Values() []string { + return []string{ + MetadataDirectiveCopy, + MetadataDirectiveReplace, + } +} + +const ( + // MetricsStatusEnabled is a MetricsStatus enum value + MetricsStatusEnabled = "Enabled" + + // MetricsStatusDisabled is a MetricsStatus enum value + MetricsStatusDisabled = "Disabled" +) + +// MetricsStatus_Values returns all elements of the MetricsStatus enum +func MetricsStatus_Values() []string { + return []string{ + MetricsStatusEnabled, + MetricsStatusDisabled, + } +} + +const ( + // ObjectAttributesEtag is a ObjectAttributes enum value + ObjectAttributesEtag = "ETag" + + // ObjectAttributesChecksum is a ObjectAttributes enum value + ObjectAttributesChecksum = "Checksum" + + // ObjectAttributesObjectParts is a ObjectAttributes enum value + ObjectAttributesObjectParts = "ObjectParts" + + // ObjectAttributesStorageClass is a ObjectAttributes enum value + ObjectAttributesStorageClass = "StorageClass" + + // ObjectAttributesObjectSize is a ObjectAttributes enum value + ObjectAttributesObjectSize = "ObjectSize" +) + +// ObjectAttributes_Values returns all elements of the ObjectAttributes enum +func ObjectAttributes_Values() []string { + return []string{ + ObjectAttributesEtag, + ObjectAttributesChecksum, + ObjectAttributesObjectParts, + ObjectAttributesStorageClass, + ObjectAttributesObjectSize, + } +} + +const ( + // ObjectCannedACLPrivate is a ObjectCannedACL enum value + ObjectCannedACLPrivate = "private" + + // ObjectCannedACLPublicRead is a ObjectCannedACL enum value + ObjectCannedACLPublicRead = "public-read" + + // ObjectCannedACLPublicReadWrite is a ObjectCannedACL enum value + ObjectCannedACLPublicReadWrite = "public-read-write" + + // ObjectCannedACLAuthenticatedRead is a ObjectCannedACL enum value + ObjectCannedACLAuthenticatedRead = "authenticated-read" + + // ObjectCannedACLAwsExecRead is a ObjectCannedACL enum value + ObjectCannedACLAwsExecRead = "aws-exec-read" + + // ObjectCannedACLBucketOwnerRead is a ObjectCannedACL enum value + ObjectCannedACLBucketOwnerRead = "bucket-owner-read" + + // ObjectCannedACLBucketOwnerFullControl is a ObjectCannedACL enum value + ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control" +) + +// ObjectCannedACL_Values returns all elements of the ObjectCannedACL enum +func ObjectCannedACL_Values() []string { + return []string{ + ObjectCannedACLPrivate, + ObjectCannedACLPublicRead, + ObjectCannedACLPublicReadWrite, + ObjectCannedACLAuthenticatedRead, + ObjectCannedACLAwsExecRead, + ObjectCannedACLBucketOwnerRead, + ObjectCannedACLBucketOwnerFullControl, + } +} + +const ( + // ObjectLockEnabledEnabled is a ObjectLockEnabled enum value + ObjectLockEnabledEnabled = "Enabled" +) + +// ObjectLockEnabled_Values returns all elements of the ObjectLockEnabled enum +func ObjectLockEnabled_Values() []string { + return []string{ + ObjectLockEnabledEnabled, + } +} + +const ( + // ObjectLockLegalHoldStatusOn is a ObjectLockLegalHoldStatus enum value + ObjectLockLegalHoldStatusOn = "ON" + + // ObjectLockLegalHoldStatusOff is a ObjectLockLegalHoldStatus enum value + ObjectLockLegalHoldStatusOff = "OFF" +) + +// ObjectLockLegalHoldStatus_Values returns all elements of the ObjectLockLegalHoldStatus enum +func ObjectLockLegalHoldStatus_Values() []string { + return []string{ + ObjectLockLegalHoldStatusOn, + ObjectLockLegalHoldStatusOff, + } +} + +const ( + // ObjectLockModeGovernance is a ObjectLockMode enum value + ObjectLockModeGovernance = "GOVERNANCE" + + // ObjectLockModeCompliance is a ObjectLockMode enum value + ObjectLockModeCompliance = "COMPLIANCE" +) + +// ObjectLockMode_Values returns all elements of the ObjectLockMode enum +func ObjectLockMode_Values() []string { + return []string{ + ObjectLockModeGovernance, + ObjectLockModeCompliance, + } +} + +const ( + // ObjectLockRetentionModeGovernance is a ObjectLockRetentionMode enum value + ObjectLockRetentionModeGovernance = "GOVERNANCE" + + // ObjectLockRetentionModeCompliance is a ObjectLockRetentionMode enum value + ObjectLockRetentionModeCompliance = "COMPLIANCE" +) + +// ObjectLockRetentionMode_Values returns all elements of the ObjectLockRetentionMode enum +func ObjectLockRetentionMode_Values() []string { + return []string{ + ObjectLockRetentionModeGovernance, + ObjectLockRetentionModeCompliance, + } +} + +// The container element for object ownership for a bucket's ownership controls. +// +// BucketOwnerPreferred - Objects uploaded to the bucket change ownership to +// the bucket owner if the objects are uploaded with the bucket-owner-full-control +// canned ACL. +// +// ObjectWriter - The uploading account will own the object if the object is +// uploaded with the bucket-owner-full-control canned ACL. +// +// BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer +// affect permissions. The bucket owner automatically owns and has full control +// over every object in the bucket. The bucket only accepts PUT requests that +// don't specify an ACL or specify bucket owner full control ACLs (such as the +// predefined bucket-owner-full-control canned ACL or a custom ACL in XML format +// that grants the same permissions). +// +// By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are disabled. +// We recommend keeping ACLs disabled, except in uncommon use cases where you +// must control access for each object individually. For more information about +// S3 Object Ownership, see Controlling ownership of objects and disabling ACLs +// for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. +// +// This functionality is not supported for directory buckets. Directory buckets +// use the bucket owner enforced setting for S3 Object Ownership. +const ( + // ObjectOwnershipBucketOwnerPreferred is a ObjectOwnership enum value + ObjectOwnershipBucketOwnerPreferred = "BucketOwnerPreferred" + + // ObjectOwnershipObjectWriter is a ObjectOwnership enum value + ObjectOwnershipObjectWriter = "ObjectWriter" + + // ObjectOwnershipBucketOwnerEnforced is a ObjectOwnership enum value + ObjectOwnershipBucketOwnerEnforced = "BucketOwnerEnforced" +) + +// ObjectOwnership_Values returns all elements of the ObjectOwnership enum +func ObjectOwnership_Values() []string { + return []string{ + ObjectOwnershipBucketOwnerPreferred, + ObjectOwnershipObjectWriter, + ObjectOwnershipBucketOwnerEnforced, + } +} + +const ( + // ObjectStorageClassStandard is a ObjectStorageClass enum value + ObjectStorageClassStandard = "STANDARD" + + // ObjectStorageClassReducedRedundancy is a ObjectStorageClass enum value + ObjectStorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + + // ObjectStorageClassGlacier is a ObjectStorageClass enum value + ObjectStorageClassGlacier = "GLACIER" + + // ObjectStorageClassStandardIa is a ObjectStorageClass enum value + ObjectStorageClassStandardIa = "STANDARD_IA" + + // ObjectStorageClassOnezoneIa is a ObjectStorageClass enum value + ObjectStorageClassOnezoneIa = "ONEZONE_IA" + + // ObjectStorageClassIntelligentTiering is a ObjectStorageClass enum value + ObjectStorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // ObjectStorageClassDeepArchive is a ObjectStorageClass enum value + ObjectStorageClassDeepArchive = "DEEP_ARCHIVE" + + // ObjectStorageClassOutposts is a ObjectStorageClass enum value + ObjectStorageClassOutposts = "OUTPOSTS" + + // ObjectStorageClassGlacierIr is a ObjectStorageClass enum value + ObjectStorageClassGlacierIr = "GLACIER_IR" + + // ObjectStorageClassSnow is a ObjectStorageClass enum value + ObjectStorageClassSnow = "SNOW" + + // ObjectStorageClassExpressOnezone is a ObjectStorageClass enum value + ObjectStorageClassExpressOnezone = "EXPRESS_ONEZONE" +) + +// ObjectStorageClass_Values returns all elements of the ObjectStorageClass enum +func ObjectStorageClass_Values() []string { + return []string{ + ObjectStorageClassStandard, + ObjectStorageClassReducedRedundancy, + ObjectStorageClassGlacier, + ObjectStorageClassStandardIa, + ObjectStorageClassOnezoneIa, + ObjectStorageClassIntelligentTiering, + ObjectStorageClassDeepArchive, + ObjectStorageClassOutposts, + ObjectStorageClassGlacierIr, + ObjectStorageClassSnow, + ObjectStorageClassExpressOnezone, + } +} + +const ( + // ObjectVersionStorageClassStandard is a ObjectVersionStorageClass enum value + ObjectVersionStorageClassStandard = "STANDARD" +) + +// ObjectVersionStorageClass_Values returns all elements of the ObjectVersionStorageClass enum +func ObjectVersionStorageClass_Values() []string { + return []string{ + ObjectVersionStorageClassStandard, + } +} + +const ( + // OptionalObjectAttributesRestoreStatus is a OptionalObjectAttributes enum value + OptionalObjectAttributesRestoreStatus = "RestoreStatus" +) + +// OptionalObjectAttributes_Values returns all elements of the OptionalObjectAttributes enum +func OptionalObjectAttributes_Values() []string { + return []string{ + OptionalObjectAttributesRestoreStatus, + } +} + +const ( + // OwnerOverrideDestination is a OwnerOverride enum value + OwnerOverrideDestination = "Destination" +) + +// OwnerOverride_Values returns all elements of the OwnerOverride enum +func OwnerOverride_Values() []string { + return []string{ + OwnerOverrideDestination, + } +} + +const ( + // PartitionDateSourceEventTime is a PartitionDateSource enum value + PartitionDateSourceEventTime = "EventTime" + + // PartitionDateSourceDeliveryTime is a PartitionDateSource enum value + PartitionDateSourceDeliveryTime = "DeliveryTime" +) + +// PartitionDateSource_Values returns all elements of the PartitionDateSource enum +func PartitionDateSource_Values() []string { + return []string{ + PartitionDateSourceEventTime, + PartitionDateSourceDeliveryTime, + } +} + +const ( + // PayerRequester is a Payer enum value + PayerRequester = "Requester" + + // PayerBucketOwner is a Payer enum value + PayerBucketOwner = "BucketOwner" +) + +// Payer_Values returns all elements of the Payer enum +func Payer_Values() []string { + return []string{ + PayerRequester, + PayerBucketOwner, + } +} + +const ( + // PermissionFullControl is a Permission enum value + PermissionFullControl = "FULL_CONTROL" + + // PermissionWrite is a Permission enum value + PermissionWrite = "WRITE" + + // PermissionWriteAcp is a Permission enum value + PermissionWriteAcp = "WRITE_ACP" + + // PermissionRead is a Permission enum value + PermissionRead = "READ" + + // PermissionReadAcp is a Permission enum value + PermissionReadAcp = "READ_ACP" +) + +// Permission_Values returns all elements of the Permission enum +func Permission_Values() []string { + return []string{ + PermissionFullControl, + PermissionWrite, + PermissionWriteAcp, + PermissionRead, + PermissionReadAcp, + } +} + +const ( + // ProtocolHttp is a Protocol enum value + ProtocolHttp = "http" + + // ProtocolHttps is a Protocol enum value + ProtocolHttps = "https" +) + +// Protocol_Values returns all elements of the Protocol enum +func Protocol_Values() []string { + return []string{ + ProtocolHttp, + ProtocolHttps, + } +} + +const ( + // QuoteFieldsAlways is a QuoteFields enum value + QuoteFieldsAlways = "ALWAYS" + + // QuoteFieldsAsneeded is a QuoteFields enum value + QuoteFieldsAsneeded = "ASNEEDED" +) + +// QuoteFields_Values returns all elements of the QuoteFields enum +func QuoteFields_Values() []string { + return []string{ + QuoteFieldsAlways, + QuoteFieldsAsneeded, + } +} + +const ( + // ReplicaModificationsStatusEnabled is a ReplicaModificationsStatus enum value + ReplicaModificationsStatusEnabled = "Enabled" + + // ReplicaModificationsStatusDisabled is a ReplicaModificationsStatus enum value + ReplicaModificationsStatusDisabled = "Disabled" +) + +// ReplicaModificationsStatus_Values returns all elements of the ReplicaModificationsStatus enum +func ReplicaModificationsStatus_Values() []string { + return []string{ + ReplicaModificationsStatusEnabled, + ReplicaModificationsStatusDisabled, + } +} + +const ( + // ReplicationRuleStatusEnabled is a ReplicationRuleStatus enum value + ReplicationRuleStatusEnabled = "Enabled" + + // ReplicationRuleStatusDisabled is a ReplicationRuleStatus enum value + ReplicationRuleStatusDisabled = "Disabled" +) + +// ReplicationRuleStatus_Values returns all elements of the ReplicationRuleStatus enum +func ReplicationRuleStatus_Values() []string { + return []string{ + ReplicationRuleStatusEnabled, + ReplicationRuleStatusDisabled, + } +} + +const ( + // ReplicationStatusComplete is a ReplicationStatus enum value + ReplicationStatusComplete = "COMPLETE" + + // ReplicationStatusPending is a ReplicationStatus enum value + ReplicationStatusPending = "PENDING" + + // ReplicationStatusFailed is a ReplicationStatus enum value + ReplicationStatusFailed = "FAILED" + + // ReplicationStatusReplica is a ReplicationStatus enum value + ReplicationStatusReplica = "REPLICA" + + // ReplicationStatusCompleted is a ReplicationStatus enum value + ReplicationStatusCompleted = "COMPLETED" +) + +// ReplicationStatus_Values returns all elements of the ReplicationStatus enum +func ReplicationStatus_Values() []string { + return []string{ + ReplicationStatusComplete, + ReplicationStatusPending, + ReplicationStatusFailed, + ReplicationStatusReplica, + ReplicationStatusCompleted, + } +} + +const ( + // ReplicationTimeStatusEnabled is a ReplicationTimeStatus enum value + ReplicationTimeStatusEnabled = "Enabled" + + // ReplicationTimeStatusDisabled is a ReplicationTimeStatus enum value + ReplicationTimeStatusDisabled = "Disabled" +) + +// ReplicationTimeStatus_Values returns all elements of the ReplicationTimeStatus enum +func ReplicationTimeStatus_Values() []string { + return []string{ + ReplicationTimeStatusEnabled, + ReplicationTimeStatusDisabled, + } +} + +// If present, indicates that the requester was successfully charged for the +// request. +// +// This functionality is not supported for directory buckets. +const ( + // RequestChargedRequester is a RequestCharged enum value + RequestChargedRequester = "requester" +) + +// RequestCharged_Values returns all elements of the RequestCharged enum +func RequestCharged_Values() []string { + return []string{ + RequestChargedRequester, + } +} + +// Confirms that the requester knows that they will be charged for the request. +// Bucket owners need not specify this parameter in their requests. If either +// the source or destination S3 bucket has Requester Pays enabled, the requester +// will pay for corresponding charges to copy the object. For information about +// downloading objects from Requester Pays buckets, see Downloading Objects +// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) +// in the Amazon S3 User Guide. +// +// This functionality is not supported for directory buckets. +const ( + // RequestPayerRequester is a RequestPayer enum value + RequestPayerRequester = "requester" +) + +// RequestPayer_Values returns all elements of the RequestPayer enum +func RequestPayer_Values() []string { + return []string{ + RequestPayerRequester, + } +} + +const ( + // RestoreRequestTypeSelect is a RestoreRequestType enum value + RestoreRequestTypeSelect = "SELECT" +) + +// RestoreRequestType_Values returns all elements of the RestoreRequestType enum +func RestoreRequestType_Values() []string { + return []string{ + RestoreRequestTypeSelect, + } +} + +const ( + // ServerSideEncryptionAes256 is a ServerSideEncryption enum value + ServerSideEncryptionAes256 = "AES256" + + // ServerSideEncryptionAwsKms is a ServerSideEncryption enum value + ServerSideEncryptionAwsKms = "aws:kms" + + // ServerSideEncryptionAwsKmsDsse is a ServerSideEncryption enum value + ServerSideEncryptionAwsKmsDsse = "aws:kms:dsse" +) + +// ServerSideEncryption_Values returns all elements of the ServerSideEncryption enum +func ServerSideEncryption_Values() []string { + return []string{ + ServerSideEncryptionAes256, + ServerSideEncryptionAwsKms, + ServerSideEncryptionAwsKmsDsse, + } +} + +const ( + // SessionModeReadOnly is a SessionMode enum value + SessionModeReadOnly = "ReadOnly" + + // SessionModeReadWrite is a SessionMode enum value + SessionModeReadWrite = "ReadWrite" +) + +// SessionMode_Values returns all elements of the SessionMode enum +func SessionMode_Values() []string { + return []string{ + SessionModeReadOnly, + SessionModeReadWrite, + } +} + +const ( + // SseKmsEncryptedObjectsStatusEnabled is a SseKmsEncryptedObjectsStatus enum value + SseKmsEncryptedObjectsStatusEnabled = "Enabled" + + // SseKmsEncryptedObjectsStatusDisabled is a SseKmsEncryptedObjectsStatus enum value + SseKmsEncryptedObjectsStatusDisabled = "Disabled" +) + +// SseKmsEncryptedObjectsStatus_Values returns all elements of the SseKmsEncryptedObjectsStatus enum +func SseKmsEncryptedObjectsStatus_Values() []string { + return []string{ + SseKmsEncryptedObjectsStatusEnabled, + SseKmsEncryptedObjectsStatusDisabled, + } +} + +const ( + // StorageClassStandard is a StorageClass enum value + StorageClassStandard = "STANDARD" + + // StorageClassReducedRedundancy is a StorageClass enum value + StorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + + // StorageClassStandardIa is a StorageClass enum value + StorageClassStandardIa = "STANDARD_IA" + + // StorageClassOnezoneIa is a StorageClass enum value + StorageClassOnezoneIa = "ONEZONE_IA" + + // StorageClassIntelligentTiering is a StorageClass enum value + StorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // StorageClassGlacier is a StorageClass enum value + StorageClassGlacier = "GLACIER" + + // StorageClassDeepArchive is a StorageClass enum value + StorageClassDeepArchive = "DEEP_ARCHIVE" + + // StorageClassOutposts is a StorageClass enum value + StorageClassOutposts = "OUTPOSTS" + + // StorageClassGlacierIr is a StorageClass enum value + StorageClassGlacierIr = "GLACIER_IR" + + // StorageClassSnow is a StorageClass enum value + StorageClassSnow = "SNOW" + + // StorageClassExpressOnezone is a StorageClass enum value + StorageClassExpressOnezone = "EXPRESS_ONEZONE" +) + +// StorageClass_Values returns all elements of the StorageClass enum +func StorageClass_Values() []string { + return []string{ + StorageClassStandard, + StorageClassReducedRedundancy, + StorageClassStandardIa, + StorageClassOnezoneIa, + StorageClassIntelligentTiering, + StorageClassGlacier, + StorageClassDeepArchive, + StorageClassOutposts, + StorageClassGlacierIr, + StorageClassSnow, + StorageClassExpressOnezone, + } +} + +const ( + // StorageClassAnalysisSchemaVersionV1 is a StorageClassAnalysisSchemaVersion enum value + StorageClassAnalysisSchemaVersionV1 = "V_1" +) + +// StorageClassAnalysisSchemaVersion_Values returns all elements of the StorageClassAnalysisSchemaVersion enum +func StorageClassAnalysisSchemaVersion_Values() []string { + return []string{ + StorageClassAnalysisSchemaVersionV1, + } +} + +const ( + // TaggingDirectiveCopy is a TaggingDirective enum value + TaggingDirectiveCopy = "COPY" + + // TaggingDirectiveReplace is a TaggingDirective enum value + TaggingDirectiveReplace = "REPLACE" +) + +// TaggingDirective_Values returns all elements of the TaggingDirective enum +func TaggingDirective_Values() []string { + return []string{ + TaggingDirectiveCopy, + TaggingDirectiveReplace, + } +} + +const ( + // TierStandard is a Tier enum value + TierStandard = "Standard" + + // TierBulk is a Tier enum value + TierBulk = "Bulk" + + // TierExpedited is a Tier enum value + TierExpedited = "Expedited" +) + +// Tier_Values returns all elements of the Tier enum +func Tier_Values() []string { + return []string{ + TierStandard, + TierBulk, + TierExpedited, + } +} + +const ( + // TransitionStorageClassGlacier is a TransitionStorageClass enum value + TransitionStorageClassGlacier = "GLACIER" + + // TransitionStorageClassStandardIa is a TransitionStorageClass enum value + TransitionStorageClassStandardIa = "STANDARD_IA" + + // TransitionStorageClassOnezoneIa is a TransitionStorageClass enum value + TransitionStorageClassOnezoneIa = "ONEZONE_IA" + + // TransitionStorageClassIntelligentTiering is a TransitionStorageClass enum value + TransitionStorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // TransitionStorageClassDeepArchive is a TransitionStorageClass enum value + TransitionStorageClassDeepArchive = "DEEP_ARCHIVE" + + // TransitionStorageClassGlacierIr is a TransitionStorageClass enum value + TransitionStorageClassGlacierIr = "GLACIER_IR" +) + +// TransitionStorageClass_Values returns all elements of the TransitionStorageClass enum +func TransitionStorageClass_Values() []string { + return []string{ + TransitionStorageClassGlacier, + TransitionStorageClassStandardIa, + TransitionStorageClassOnezoneIa, + TransitionStorageClassIntelligentTiering, + TransitionStorageClassDeepArchive, + TransitionStorageClassGlacierIr, + } +} + +const ( + // TypeCanonicalUser is a Type enum value + TypeCanonicalUser = "CanonicalUser" + + // TypeAmazonCustomerByEmail is a Type enum value + TypeAmazonCustomerByEmail = "AmazonCustomerByEmail" + + // TypeGroup is a Type enum value + TypeGroup = "Group" +) + +// Type_Values returns all elements of the Type enum +func Type_Values() []string { + return []string{ + TypeCanonicalUser, + TypeAmazonCustomerByEmail, + TypeGroup, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go new file mode 100644 index 000000000..407f06b6e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go @@ -0,0 +1,202 @@ +package s3 + +import ( + "bytes" + "crypto/md5" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + "hash" + "io" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + contentMD5Header = "Content-Md5" + contentSha256Header = "X-Amz-Content-Sha256" + amzTeHeader = "X-Amz-Te" + amzTxEncodingHeader = "X-Amz-Transfer-Encoding" + + appendMD5TxEncoding = "append-md5" +) + +// computeBodyHashes will add Content MD5 and Content Sha256 hashes to the +// request. If the body is not seekable or S3DisableContentMD5Validation set +// this handler will be ignored. +func computeBodyHashes(r *request.Request) { + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + if r.IsPresigned() { + return + } + if r.Error != nil || !aws.IsReaderSeekable(r.Body) { + return + } + + var md5Hash, sha256Hash hash.Hash + hashers := make([]io.Writer, 0, 2) + + // Determine upfront which hashes can be set without overriding user + // provide header data. + if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) == 0 { + md5Hash = md5.New() + hashers = append(hashers, md5Hash) + } + + if v := r.HTTPRequest.Header.Get(contentSha256Header); len(v) == 0 { + sha256Hash = sha256.New() + hashers = append(hashers, sha256Hash) + } + + // Create the destination writer based on the hashes that are not already + // provided by the user. + var dst io.Writer + switch len(hashers) { + case 0: + return + case 1: + dst = hashers[0] + default: + dst = io.MultiWriter(hashers...) + } + + if _, err := aws.CopySeekableBody(dst, r.Body); err != nil { + r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err) + return + } + + // For the hashes created, set the associated headers that the user did not + // already provide. + if md5Hash != nil { + sum := make([]byte, md5.Size) + encoded := make([]byte, md5Base64EncLen) + + base64.StdEncoding.Encode(encoded, md5Hash.Sum(sum[0:0])) + r.HTTPRequest.Header[contentMD5Header] = []string{string(encoded)} + } + + if sha256Hash != nil { + encoded := make([]byte, sha256HexEncLen) + sum := make([]byte, sha256.Size) + + hex.Encode(encoded, sha256Hash.Sum(sum[0:0])) + r.HTTPRequest.Header[contentSha256Header] = []string{string(encoded)} + } +} + +const ( + md5Base64EncLen = (md5.Size + 2) / 3 * 4 // base64.StdEncoding.EncodedLen + sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen +) + +// Adds the x-amz-te: append_md5 header to the request. This requests the service +// responds with a trailing MD5 checksum. +// +// Will not ask for append MD5 if disabled, the request is presigned or, +// or the API operation does not support content MD5 validation. +func askForTxEncodingAppendMD5(r *request.Request) { + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + if r.IsPresigned() { + return + } + r.HTTPRequest.Header.Set(amzTeHeader, appendMD5TxEncoding) +} + +func useMD5ValidationReader(r *request.Request) { + if r.Error != nil { + return + } + + if v := r.HTTPResponse.Header.Get(amzTxEncodingHeader); v != appendMD5TxEncoding { + return + } + + var bodyReader *io.ReadCloser + var contentLen int64 + switch tv := r.Data.(type) { + case *GetObjectOutput: + bodyReader = &tv.Body + contentLen = aws.Int64Value(tv.ContentLength) + // Update ContentLength hiden the trailing MD5 checksum. + tv.ContentLength = aws.Int64(contentLen - md5.Size) + tv.ContentRange = aws.String(r.HTTPResponse.Header.Get("X-Amz-Content-Range")) + default: + r.Error = awserr.New("ChecksumValidationError", + fmt.Sprintf("%s: %s header received on unsupported API, %s", + amzTxEncodingHeader, appendMD5TxEncoding, r.Operation.Name, + ), nil) + return + } + + if contentLen < md5.Size { + r.Error = awserr.New("ChecksumValidationError", + fmt.Sprintf("invalid Content-Length %d for %s %s", + contentLen, appendMD5TxEncoding, amzTxEncodingHeader, + ), nil) + return + } + + // Wrap and swap the response body reader with the validation reader. + *bodyReader = newMD5ValidationReader(*bodyReader, contentLen-md5.Size) +} + +type md5ValidationReader struct { + rawReader io.ReadCloser + payload io.Reader + hash hash.Hash + + payloadLen int64 + read int64 +} + +func newMD5ValidationReader(reader io.ReadCloser, payloadLen int64) *md5ValidationReader { + h := md5.New() + return &md5ValidationReader{ + rawReader: reader, + payload: io.TeeReader(&io.LimitedReader{R: reader, N: payloadLen}, h), + hash: h, + payloadLen: payloadLen, + } +} + +func (v *md5ValidationReader) Read(p []byte) (n int, err error) { + n, err = v.payload.Read(p) + if err != nil && err != io.EOF { + return n, err + } + + v.read += int64(n) + + if err == io.EOF { + if v.read != v.payloadLen { + return n, io.ErrUnexpectedEOF + } + expectSum := make([]byte, md5.Size) + actualSum := make([]byte, md5.Size) + if _, sumReadErr := io.ReadFull(v.rawReader, expectSum); sumReadErr != nil { + return n, sumReadErr + } + actualSum = v.hash.Sum(actualSum[0:0]) + if !bytes.Equal(expectSum, actualSum) { + return n, awserr.New("InvalidChecksum", + fmt.Sprintf("expected MD5 checksum %s, got %s", + hex.EncodeToString(expectSum), + hex.EncodeToString(actualSum), + ), + nil) + } + } + + return n, err +} + +func (v *md5ValidationReader) Close() error { + return v.rawReader.Close() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go new file mode 100644 index 000000000..20828387e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go @@ -0,0 +1,107 @@ +package s3 + +import ( + "io/ioutil" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`) + +// NormalizeBucketLocation is a utility function which will update the +// passed in value to always be a region ID. Generally this would be used +// with GetBucketLocation API operation. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +func NormalizeBucketLocation(loc string) string { + switch loc { + case "": + loc = "us-east-1" + case "EU": + loc = "eu-west-1" + } + + return loc +} + +// NormalizeBucketLocationHandler is a request handler which will update the +// GetBucketLocation's result LocationConstraint value to always be a region ID. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +// +// req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{ +// Bucket: aws.String(bucket), +// }) +// req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) +// err := req.Send() +var NormalizeBucketLocationHandler = request.NamedHandler{ + Name: "awssdk.s3.NormalizeBucketLocation", + Fn: func(req *request.Request) { + if req.Error != nil { + return + } + + out := req.Data.(*GetBucketLocationOutput) + loc := NormalizeBucketLocation(aws.StringValue(out.LocationConstraint)) + out.LocationConstraint = aws.String(loc) + }, +} + +// WithNormalizeBucketLocation is a request option which will update the +// GetBucketLocation's result LocationConstraint value to always be a region ID. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +// +// result, err := svc.GetBucketLocationWithContext(ctx, +// &s3.GetBucketLocationInput{ +// Bucket: aws.String(bucket), +// }, +// s3.WithNormalizeBucketLocation, +// ) +func WithNormalizeBucketLocation(r *request.Request) { + r.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) +} + +func buildGetBucketLocation(r *request.Request) { + if r.DataFilled() { + out := r.Data.(*GetBucketLocationOutput) + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, + "failed reading response body", err) + return + } + + match := reBucketLocation.FindSubmatch(b) + if len(match) > 1 { + loc := string(match[1]) + out.LocationConstraint = aws.String(loc) + } + } +} + +func populateLocationConstraint(r *request.Request) { + if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" { + in := r.Params.(*CreateBucketInput) + if in.CreateBucketConfiguration == nil { + r.Params = awsutil.CopyOf(r.Params) + in = r.Params.(*CreateBucketInput) + in.CreateBucketConfiguration = &CreateBucketConfiguration{ + LocationConstraint: r.Config.Region, + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go new file mode 100644 index 000000000..229606b70 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -0,0 +1,89 @@ +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" + "github.com/aws/aws-sdk-go/internal/s3shared/s3err" +) + +func init() { + initClient = defaultInitClientFn + initRequest = defaultInitRequestFn +} + +func defaultInitClientFn(c *client.Client) { + if c.Config.UseDualStackEndpoint == endpoints.DualStackEndpointStateUnset { + if aws.BoolValue(c.Config.UseDualStack) { + c.Config.UseDualStackEndpoint = endpoints.DualStackEndpointStateEnabled + } else { + c.Config.UseDualStackEndpoint = endpoints.DualStackEndpointStateDisabled + } + } + + // Support building custom endpoints based on config + c.Handlers.Build.PushFront(endpointHandler) + + // Require SSL when using SSE keys + c.Handlers.Validate.PushBack(validateSSERequiresSSL) + c.Handlers.Build.PushBack(computeSSEKeyMD5) + c.Handlers.Build.PushBack(computeCopySourceSSEKeyMD5) + + // S3 uses custom error unmarshaling logic + c.Handlers.UnmarshalError.Clear() + c.Handlers.UnmarshalError.PushBack(unmarshalError) + c.Handlers.UnmarshalError.PushBackNamed(s3err.RequestFailureWrapperHandler()) +} + +func defaultInitRequestFn(r *request.Request) { + // Add request handlers for specific platforms. + // e.g. 100-continue support for PUT requests using Go 1.6 + platformRequestHandlers(r) + + switch r.Operation.Name { + case opGetBucketLocation: + // GetBucketLocation has custom parsing logic + r.Handlers.Unmarshal.PushFront(buildGetBucketLocation) + case opCreateBucket: + // Auto-populate LocationConstraint with current region + r.Handlers.Validate.PushFront(populateLocationConstraint) + case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: + r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarshalError) + r.Handlers.Unmarshal.PushBackNamed(s3err.RequestFailureWrapperHandler()) + case opPutObject, opUploadPart: + r.Handlers.Build.PushBack(computeBodyHashes) + // Disabled until #1837 root issue is resolved. + // case opGetObject: + // r.Handlers.Build.PushBack(askForTxEncodingAppendMD5) + // r.Handlers.Unmarshal.PushBack(useMD5ValidationReader) + case opWriteGetObjectResponse: + r.Handlers.Build.PushFront(buildWriteGetObjectResponseEndpoint) + } +} + +// bucketGetter is an accessor interface to grab the "Bucket" field from +// an S3 type. +type bucketGetter interface { + getBucket() string +} + +// sseCustomerKeyGetter is an accessor interface to grab the "SSECustomerKey" +// field from an S3 type. +type sseCustomerKeyGetter interface { + getSSECustomerKey() string +} + +// copySourceSSECustomerKeyGetter is an accessor interface to grab the +// "CopySourceSSECustomerKey" field from an S3 type. +type copySourceSSECustomerKeyGetter interface { + getCopySourceSSECustomerKey() string +} + +// endpointARNGetter is an accessor interface to grab the +// the field corresponding to an endpoint ARN input. +type endpointARNGetter interface { + getEndpointARN() (arn.Resource, error) + hasEndpointARN() bool +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go new file mode 100644 index 000000000..c148f757e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go @@ -0,0 +1,26 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package s3 provides the client and types for making API +// requests to Amazon Simple Storage Service. +// +// See https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01 for more information on this service. +// +// See s3 package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/ +// +// # Using the Client +// +// To contact Amazon Simple Storage Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon Simple Storage Service client S3 for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New +package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go new file mode 100644 index 000000000..2e8244f8f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go @@ -0,0 +1,109 @@ +// Upload Managers +// +// The s3manager package's Uploader provides concurrent upload of content to S3 +// by taking advantage of S3's Multipart APIs. The Uploader also supports both +// io.Reader for streaming uploads, and will also take advantage of io.ReadSeeker +// for optimizations if the Body satisfies that type. Once the Uploader instance +// is created you can call Upload concurrently from multiple goroutines safely. +// +// // The session the S3 Uploader will use +// sess := session.Must(session.NewSession()) +// +// // Create an uploader with the session and default options +// uploader := s3manager.NewUploader(sess) +// +// f, err := os.Open(filename) +// if err != nil { +// return fmt.Errorf("failed to open file %q, %v", filename, err) +// } +// +// // Upload the file to S3. +// result, err := uploader.Upload(&s3manager.UploadInput{ +// Bucket: aws.String(myBucket), +// Key: aws.String(myString), +// Body: f, +// }) +// if err != nil { +// return fmt.Errorf("failed to upload file, %v", err) +// } +// fmt.Printf("file uploaded to, %s\n", aws.StringValue(result.Location)) +// +// See the s3manager package's Uploader type documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Uploader +// +// # Download Manager +// +// The s3manager package's Downloader provides concurrently downloading of Objects +// from S3. The Downloader will write S3 Object content with an io.WriterAt. +// Once the Downloader instance is created you can call Download concurrently from +// multiple goroutines safely. +// +// // The session the S3 Downloader will use +// sess := session.Must(session.NewSession()) +// +// // Create a downloader with the session and default options +// downloader := s3manager.NewDownloader(sess) +// +// // Create a file to write the S3 Object contents to. +// f, err := os.Create(filename) +// if err != nil { +// return fmt.Errorf("failed to create file %q, %v", filename, err) +// } +// +// // Write the contents of S3 Object to the file +// n, err := downloader.Download(f, &s3.GetObjectInput{ +// Bucket: aws.String(myBucket), +// Key: aws.String(myString), +// }) +// if err != nil { +// return fmt.Errorf("failed to download file, %v", err) +// } +// fmt.Printf("file downloaded, %d bytes\n", n) +// +// See the s3manager package's Downloader type documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader +// +// # Automatic URI cleaning +// +// Interacting with objects whose keys contain adjacent slashes (e.g. bucketname/foo//bar/objectname) +// requires setting DisableRestProtocolURICleaning to true in the aws.Config struct +// used by the service client. +// +// svc := s3.New(sess, &aws.Config{ +// DisableRestProtocolURICleaning: aws.Bool(true), +// }) +// out, err := svc.GetObject(&s3.GetObjectInput { +// Bucket: aws.String("bucketname"), +// Key: aws.String("//foo//bar//moo"), +// }) +// +// # Get Bucket Region +// +// GetBucketRegion will attempt to get the region for a bucket using a region +// hint to determine which AWS partition to perform the query on. Use this utility +// to determine the region a bucket is in. +// +// sess := session.Must(session.NewSession()) +// +// bucket := "my-bucket" +// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2") +// if err != nil { +// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" { +// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket) +// } +// return err +// } +// fmt.Printf("Bucket %s is in %s region\n", bucket, region) +// +// See the s3manager package's GetBucketRegion function documentation for more information +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#GetBucketRegion +// +// # S3 Crypto Client +// +// The s3crypto package provides the tools to upload and download encrypted +// content from S3. The Encryption and Decryption clients can be used concurrently +// once the client is created. +// +// See the s3crypto package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/ +package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go new file mode 100644 index 000000000..71b438692 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go @@ -0,0 +1,298 @@ +package s3 + +import ( + "fmt" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/endpoints" + "net/url" + "strings" + + "github.com/aws/aws-sdk-go/aws" + awsarn "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" +) + +const ( + s3Namespace = "s3" + s3AccessPointNamespace = "s3-accesspoint" + s3ObjectsLambdaNamespace = "s3-object-lambda" + s3OutpostsNamespace = "s3-outposts" +) + +// Used by shapes with members decorated as endpoint ARN. +func parseEndpointARN(v string) (arn.Resource, error) { + return arn.ParseResource(v, accessPointResourceParser) +} + +func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) { + resParts := arn.SplitResource(a.Resource) + switch resParts[0] { + case "accesspoint": + switch a.Service { + case s3Namespace: + return arn.ParseAccessPointResource(a, resParts[1:]) + case s3ObjectsLambdaNamespace: + return parseS3ObjectLambdaAccessPointResource(a, resParts) + default: + return arn.AccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s or %s", s3Namespace, s3ObjectsLambdaNamespace)} + } + case "outpost": + if a.Service != "s3-outposts" { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"} + } + return parseOutpostAccessPointResource(a, resParts[1:]) + default: + return nil, arn.InvalidARNError{ARN: a, Reason: "unknown resource type"} + } +} + +// parseOutpostAccessPointResource attempts to parse the ARNs resource as an +// outpost access-point resource. +// +// Supported Outpost AccessPoint ARN format: +// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName} +// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint +func parseOutpostAccessPointResource(a awsarn.ARN, resParts []string) (arn.OutpostAccessPointARN, error) { + // outpost accesspoint arn is only valid if service is s3-outposts + if a.Service != "s3-outposts" { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"} + } + + if len(resParts) == 0 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + if len(resParts) < 3 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ + ARN: a, Reason: "access-point resource not set in Outpost ARN", + } + } + + resID := strings.TrimSpace(resParts[0]) + if len(resID) == 0 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + var outpostAccessPointARN = arn.OutpostAccessPointARN{} + switch resParts[1] { + case "accesspoint": + accessPointARN, err := arn.ParseAccessPointResource(a, resParts[2:]) + if err != nil { + return arn.OutpostAccessPointARN{}, err + } + // set access-point arn + outpostAccessPointARN.AccessPointARN = accessPointARN + default: + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "access-point resource not set in Outpost ARN"} + } + + // set outpost id + outpostAccessPointARN.OutpostID = resID + return outpostAccessPointARN, nil +} + +func parseS3ObjectLambdaAccessPointResource(a awsarn.ARN, resParts []string) (arn.S3ObjectLambdaAccessPointARN, error) { + if a.Service != s3ObjectsLambdaNamespace { + return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s", s3ObjectsLambdaNamespace)} + } + + accessPointARN, err := arn.ParseAccessPointResource(a, resParts[1:]) + if err != nil { + return arn.S3ObjectLambdaAccessPointARN{}, err + } + + if len(accessPointARN.Region) == 0 { + return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("%s region not set", s3ObjectsLambdaNamespace)} + } + + return arn.S3ObjectLambdaAccessPointARN{ + AccessPointARN: accessPointARN, + }, nil +} + +func endpointHandler(req *request.Request) { + endpoint, ok := req.Params.(endpointARNGetter) + if !ok || !endpoint.hasEndpointARN() { + updateBucketEndpointFromParams(req) + return + } + + resource, err := endpoint.getEndpointARN() + if err != nil { + req.Error = s3shared.NewInvalidARNError(nil, err) + return + } + + resReq := s3shared.ResourceRequest{ + Resource: resource, + Request: req, + } + + if len(resReq.Request.ClientInfo.PartitionID) != 0 && resReq.IsCrossPartition() { + req.Error = s3shared.NewClientPartitionMismatchError(resource, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + return + } + + if !resReq.AllowCrossRegion() && resReq.IsCrossRegion() { + req.Error = s3shared.NewClientRegionMismatchError(resource, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + return + } + + switch tv := resource.(type) { + case arn.AccessPointARN: + err = updateRequestAccessPointEndpoint(req, tv) + if err != nil { + req.Error = err + } + case arn.S3ObjectLambdaAccessPointARN: + err = updateRequestS3ObjectLambdaAccessPointEndpoint(req, tv) + if err != nil { + req.Error = err + } + case arn.OutpostAccessPointARN: + // outposts does not support FIPS regions + if req.Config.UseFIPSEndpoint == endpoints.FIPSEndpointStateEnabled { + req.Error = s3shared.NewFIPSConfigurationError(resource, req.ClientInfo.PartitionID, + aws.StringValue(req.Config.Region), nil) + return + } + + err = updateRequestOutpostAccessPointEndpoint(req, tv) + if err != nil { + req.Error = err + } + default: + req.Error = s3shared.NewInvalidARNError(resource, nil) + } +} + +func updateBucketEndpointFromParams(r *request.Request) { + bucket, ok := bucketNameFromReqParams(r.Params) + if !ok { + // Ignore operation requests if the bucket name was not provided + // if this is an input validation error the validation handler + // will report it. + return + } + updateEndpointForS3Config(r, bucket) +} + +func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.AccessPointARN) error { + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + return s3shared.NewClientConfiguredForAccelerateError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Ignore the disable host prefix for access points + req.Config.DisableEndpointHostPrefix = aws.Bool(false) + + if err := accessPointEndpointBuilder(accessPoint).build(req); err != nil { + return err + } + + removeBucketFromPath(req.HTTPRequest.URL) + + return nil +} + +func updateRequestS3ObjectLambdaAccessPointEndpoint(req *request.Request, accessPoint arn.S3ObjectLambdaAccessPointARN) error { + // DualStack not supported + if isUseDualStackEndpoint(req) { + return s3shared.NewClientConfiguredForDualStackError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + return s3shared.NewClientConfiguredForAccelerateError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Ignore the disable host prefix for access points + req.Config.DisableEndpointHostPrefix = aws.Bool(false) + + if err := s3ObjectLambdaAccessPointEndpointBuilder(accessPoint).build(req); err != nil { + return err + } + + removeBucketFromPath(req.HTTPRequest.URL) + + return nil +} + +func updateRequestOutpostAccessPointEndpoint(req *request.Request, accessPoint arn.OutpostAccessPointARN) error { + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + return s3shared.NewClientConfiguredForAccelerateError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Dualstack not supported + if isUseDualStackEndpoint(req) { + return s3shared.NewClientConfiguredForDualStackError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Ignore the disable host prefix for access points + req.Config.DisableEndpointHostPrefix = aws.Bool(false) + + if err := outpostAccessPointEndpointBuilder(accessPoint).build(req); err != nil { + return err + } + + removeBucketFromPath(req.HTTPRequest.URL) + return nil +} + +func removeBucketFromPath(u *url.URL) { + u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1) + if u.Path == "" { + u.Path = "/" + } +} + +func buildWriteGetObjectResponseEndpoint(req *request.Request) { + // DualStack not supported + if isUseDualStackEndpoint(req) { + req.Error = awserr.New("ConfigurationError", "client configured for dualstack but not supported for operation", nil) + return + } + + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + req.Error = awserr.New("ConfigurationError", "client configured for accelerate but not supported for operation", nil) + return + } + + signingName := s3ObjectsLambdaNamespace + signingRegion := req.ClientInfo.SigningRegion + + if !hasCustomEndpoint(req) { + endpoint, err := resolveRegionalEndpoint(req, aws.StringValue(req.Config.Region), req.ClientInfo.ResolvedRegion, EndpointsID) + if err != nil { + req.Error = awserr.New(request.ErrCodeSerialization, "failed to resolve endpoint", err) + return + } + signingRegion = endpoint.SigningRegion + + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + req.Error = err + return + } + updateS3HostPrefixForS3ObjectLambda(req) + } + + redirectSigner(req, signingName, signingRegion) +} + +func isUseDualStackEndpoint(req *request.Request) bool { + if req.Config.UseDualStackEndpoint != endpoints.DualStackEndpointStateUnset { + return req.Config.UseDualStackEndpoint == endpoints.DualStackEndpointStateEnabled + } + return aws.BoolValue(req.Config.UseDualStack) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go new file mode 100644 index 000000000..7ae18ef54 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go @@ -0,0 +1,239 @@ +package s3 + +import ( + "net/url" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + accessPointPrefixLabel = "accesspoint" + accountIDPrefixLabel = "accountID" + accessPointPrefixTemplate = "{" + accessPointPrefixLabel + "}-{" + accountIDPrefixLabel + "}." + + outpostPrefixLabel = "outpost" + outpostAccessPointPrefixTemplate = accessPointPrefixTemplate + "{" + outpostPrefixLabel + "}." +) + +// hasCustomEndpoint returns true if endpoint is a custom endpoint +func hasCustomEndpoint(r *request.Request) bool { + return len(aws.StringValue(r.Config.Endpoint)) > 0 +} + +// accessPointEndpointBuilder represents the endpoint builder for access point arn +type accessPointEndpointBuilder arn.AccessPointARN + +// build builds the endpoint for corresponding access point arn +// +// For building an endpoint from access point arn, format used is: +// - Access point endpoint format : {accesspointName}-{accountId}.s3-accesspoint.{region}.{dnsSuffix} +// - example : myaccesspoint-012345678901.s3-accesspoint.us-west-2.amazonaws.com +// +// Access Point Endpoint requests are signed using "s3" as signing name. +func (a accessPointEndpointBuilder) build(req *request.Request) error { + resolveService := arn.AccessPointARN(a).Service + resolveRegion := arn.AccessPointARN(a).Region + + endpoint, err := resolveRegionalEndpoint(req, resolveRegion, "", resolveService) + if err != nil { + return s3shared.NewFailedToResolveEndpointError(arn.AccessPointARN(a), + req.ClientInfo.PartitionID, resolveRegion, err) + } + + endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL)) + + if !hasCustomEndpoint(req) { + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + return err + } + + // dual stack provided by endpoint resolver + updateS3HostForS3AccessPoint(req) + } + + protocol.HostPrefixBuilder{ + Prefix: accessPointPrefixTemplate, + LabelsFn: a.hostPrefixLabelValues, + }.Build(req) + + // signer redirection + redirectSigner(req, endpoint.SigningName, endpoint.SigningRegion) + + err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) + if err != nil { + return s3shared.NewInvalidARNError(arn.AccessPointARN(a), err) + } + + return nil +} + +func (a accessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { + return map[string]string{ + accessPointPrefixLabel: arn.AccessPointARN(a).AccessPointName, + accountIDPrefixLabel: arn.AccessPointARN(a).AccountID, + } +} + +// s3ObjectLambdaAccessPointEndpointBuilder represents the endpoint builder for an s3 object lambda access point arn +type s3ObjectLambdaAccessPointEndpointBuilder arn.S3ObjectLambdaAccessPointARN + +// build builds the endpoint for corresponding access point arn +// +// For building an endpoint from access point arn, format used is: +// - Access point endpoint format : {accesspointName}-{accountId}.s3-object-lambda.{region}.{dnsSuffix} +// - example : myaccesspoint-012345678901.s3-object-lambda.us-west-2.amazonaws.com +// +// Access Point Endpoint requests are signed using "s3-object-lambda" as signing name. +func (a s3ObjectLambdaAccessPointEndpointBuilder) build(req *request.Request) error { + resolveRegion := arn.S3ObjectLambdaAccessPointARN(a).Region + + endpoint, err := resolveRegionalEndpoint(req, resolveRegion, "", EndpointsID) + if err != nil { + return s3shared.NewFailedToResolveEndpointError(arn.S3ObjectLambdaAccessPointARN(a), + req.ClientInfo.PartitionID, resolveRegion, err) + } + + endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL)) + + endpoint.SigningName = s3ObjectsLambdaNamespace + + if !hasCustomEndpoint(req) { + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + return err + } + + updateS3HostPrefixForS3ObjectLambda(req) + } + + protocol.HostPrefixBuilder{ + Prefix: accessPointPrefixTemplate, + LabelsFn: a.hostPrefixLabelValues, + }.Build(req) + + // signer redirection + redirectSigner(req, endpoint.SigningName, endpoint.SigningRegion) + + err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) + if err != nil { + return s3shared.NewInvalidARNError(arn.S3ObjectLambdaAccessPointARN(a), err) + } + + return nil +} + +func (a s3ObjectLambdaAccessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { + return map[string]string{ + accessPointPrefixLabel: arn.S3ObjectLambdaAccessPointARN(a).AccessPointName, + accountIDPrefixLabel: arn.S3ObjectLambdaAccessPointARN(a).AccountID, + } +} + +// outpostAccessPointEndpointBuilder represents the Endpoint builder for outpost access point arn. +type outpostAccessPointEndpointBuilder arn.OutpostAccessPointARN + +// build builds an endpoint corresponding to the outpost access point arn. +// +// For building an endpoint from outpost access point arn, format used is: +// - Outpost access point endpoint format : {accesspointName}-{accountId}.{outpostId}.s3-outposts.{region}.{dnsSuffix} +// - example : myaccesspoint-012345678901.op-01234567890123456.s3-outposts.us-west-2.amazonaws.com +// +// Outpost AccessPoint Endpoint request are signed using "s3-outposts" as signing name. +func (o outpostAccessPointEndpointBuilder) build(req *request.Request) error { + resolveRegion := o.Region + resolveService := o.Service + + endpointsID := resolveService + if resolveService == s3OutpostsNamespace { + endpointsID = "s3" + } + + endpoint, err := resolveRegionalEndpoint(req, resolveRegion, "", endpointsID) + if err != nil { + return s3shared.NewFailedToResolveEndpointError(o, + req.ClientInfo.PartitionID, resolveRegion, err) + } + + endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL)) + + if !hasCustomEndpoint(req) { + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + return err + } + updateHostPrefix(req, endpointsID, resolveService) + } + + protocol.HostPrefixBuilder{ + Prefix: outpostAccessPointPrefixTemplate, + LabelsFn: o.hostPrefixLabelValues, + }.Build(req) + + // set the signing region, name to resolved names from ARN + redirectSigner(req, resolveService, resolveRegion) + + err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) + if err != nil { + return s3shared.NewInvalidARNError(o, err) + } + + return nil +} + +func (o outpostAccessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { + return map[string]string{ + accessPointPrefixLabel: o.AccessPointName, + accountIDPrefixLabel: o.AccountID, + outpostPrefixLabel: o.OutpostID, + } +} + +func resolveRegionalEndpoint(r *request.Request, region, resolvedRegion, endpointsID string) (endpoints.ResolvedEndpoint, error) { + return r.Config.EndpointResolver.EndpointFor(endpointsID, region, func(opts *endpoints.Options) { + opts.DisableSSL = aws.BoolValue(r.Config.DisableSSL) + opts.UseDualStack = aws.BoolValue(r.Config.UseDualStack) + opts.UseDualStackEndpoint = r.Config.UseDualStackEndpoint + opts.UseFIPSEndpoint = r.Config.UseFIPSEndpoint + opts.S3UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint + opts.ResolvedRegion = resolvedRegion + opts.Logger = r.Config.Logger + opts.LogDeprecated = r.Config.LogLevel.Matches(aws.LogDebugWithDeprecated) + }) +} + +func updateRequestEndpoint(r *request.Request, endpoint string) (err error) { + r.HTTPRequest.URL, err = url.Parse(endpoint + r.Operation.HTTPPath) + if err != nil { + return awserr.New(request.ErrCodeSerialization, + "failed to parse endpoint URL", err) + } + + return nil +} + +// redirectSigner sets signing name, signing region for a request +func redirectSigner(req *request.Request, signingName string, signingRegion string) { + req.ClientInfo.SigningName = signingName + req.ClientInfo.SigningRegion = signingRegion +} + +func updateS3HostForS3AccessPoint(req *request.Request) { + updateHostPrefix(req, "s3", s3AccessPointNamespace) +} + +func updateS3HostPrefixForS3ObjectLambda(req *request.Request) { + updateHostPrefix(req, "s3", s3ObjectsLambdaNamespace) +} + +func updateHostPrefix(req *request.Request, oldEndpointPrefix, newEndpointPrefix string) { + host := req.HTTPRequest.URL.Host + if strings.HasPrefix(host, oldEndpointPrefix) { + // replace service hostlabel oldEndpointPrefix to newEndpointPrefix + req.HTTPRequest.URL.Host = newEndpointPrefix + host[len(oldEndpointPrefix):] + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go new file mode 100644 index 000000000..8a67333ab --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go @@ -0,0 +1,69 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +const ( + + // ErrCodeBucketAlreadyExists for service response error code + // "BucketAlreadyExists". + // + // The requested bucket name is not available. The bucket namespace is shared + // by all users of the system. Select a different name and try again. + ErrCodeBucketAlreadyExists = "BucketAlreadyExists" + + // ErrCodeBucketAlreadyOwnedByYou for service response error code + // "BucketAlreadyOwnedByYou". + // + // The bucket you tried to create already exists, and you own it. Amazon S3 + // returns this error in all Amazon Web Services Regions except in the North + // Virginia Region. For legacy compatibility, if you re-create an existing bucket + // that you already own in the North Virginia Region, Amazon S3 returns 200 + // OK and resets the bucket access control lists (ACLs). + ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou" + + // ErrCodeInvalidObjectState for service response error code + // "InvalidObjectState". + // + // Object is archived and inaccessible until restored. + // + // If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval + // storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering + // Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier, + // before you can retrieve the object you must first restore a copy using RestoreObject + // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). + // Otherwise, this operation returns an InvalidObjectState error. For information + // about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) + // in the Amazon S3 User Guide. + ErrCodeInvalidObjectState = "InvalidObjectState" + + // ErrCodeNoSuchBucket for service response error code + // "NoSuchBucket". + // + // The specified bucket does not exist. + ErrCodeNoSuchBucket = "NoSuchBucket" + + // ErrCodeNoSuchKey for service response error code + // "NoSuchKey". + // + // The specified key does not exist. + ErrCodeNoSuchKey = "NoSuchKey" + + // ErrCodeNoSuchUpload for service response error code + // "NoSuchUpload". + // + // The specified multipart upload does not exist. + ErrCodeNoSuchUpload = "NoSuchUpload" + + // ErrCodeObjectAlreadyInActiveTierError for service response error code + // "ObjectAlreadyInActiveTierError". + // + // This action is not allowed against this storage tier. + ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError" + + // ErrCodeObjectNotInActiveTierError for service response error code + // "ObjectNotInActiveTierError". + // + // The source object of the COPY action is not in the active tier and is only + // stored in Amazon S3 Glacier. + ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go new file mode 100644 index 000000000..81cdec1ae --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go @@ -0,0 +1,136 @@ +package s3 + +import ( + "fmt" + "net/url" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// an operationBlacklist is a list of operation names that should a +// request handler should not be executed with. +type operationBlacklist []string + +// Continue will return true of the Request's operation name is not +// in the blacklist. False otherwise. +func (b operationBlacklist) Continue(r *request.Request) bool { + for i := 0; i < len(b); i++ { + if b[i] == r.Operation.Name { + return false + } + } + return true +} + +var accelerateOpBlacklist = operationBlacklist{ + opListBuckets, opCreateBucket, opDeleteBucket, +} + +// Automatically add the bucket name to the endpoint domain +// if possible. This style of bucket is valid for all bucket names which are +// DNS compatible and do not contain "." +func updateEndpointForS3Config(r *request.Request, bucketName string) { + forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle) + accelerate := aws.BoolValue(r.Config.S3UseAccelerate) + + if accelerate && accelerateOpBlacklist.Continue(r) { + if forceHostStyle { + if r.Config.Logger != nil { + r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.") + } + } + updateEndpointForAccelerate(r, bucketName) + } else if !forceHostStyle && r.Operation.Name != opGetBucketLocation { + updateEndpointForHostStyle(r, bucketName) + } +} + +func updateEndpointForHostStyle(r *request.Request, bucketName string) { + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) { + // bucket name must be valid to put into the host + return + } + + moveBucketToHost(r.HTTPRequest.URL, bucketName) +} + +var ( + accelElem = []byte("s3-accelerate.dualstack.") +) + +func updateEndpointForAccelerate(r *request.Request, bucketName string) { + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) { + r.Error = awserr.New("InvalidParameterException", + fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucketName), + nil) + return + } + + parts := strings.Split(r.HTTPRequest.URL.Host, ".") + if len(parts) < 3 { + r.Error = awserr.New("InvalidParameterExecption", + fmt.Sprintf("unable to update endpoint host for S3 accelerate, hostname invalid, %s", + r.HTTPRequest.URL.Host), nil) + return + } + + if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") { + parts[0] = "s3-accelerate" + } + for i := 1; i+1 < len(parts); i++ { + if parts[i] == aws.StringValue(r.Config.Region) { + parts = append(parts[:i], parts[i+1:]...) + break + } + } + + r.HTTPRequest.URL.Host = strings.Join(parts, ".") + + moveBucketToHost(r.HTTPRequest.URL, bucketName) +} + +// Attempts to retrieve the bucket name from the request input parameters. +// If no bucket is found, or the field is empty "", false will be returned. +func bucketNameFromReqParams(params interface{}) (string, bool) { + if iface, ok := params.(bucketGetter); ok { + b := iface.getBucket() + return b, len(b) > 0 + } + + return "", false +} + +// hostCompatibleBucketName returns true if the request should +// put the bucket in the host. This is false if S3ForcePathStyle is +// explicitly set or if the bucket is not DNS compatible. +func hostCompatibleBucketName(u *url.URL, bucket string) bool { + // Bucket might be DNS compatible but dots in the hostname will fail + // certificate validation, so do not use host-style. + if u.Scheme == "https" && strings.Contains(bucket, ".") { + return false + } + + // if the bucket is DNS compatible + return dnsCompatibleBucketName(bucket) +} + +var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) +var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) + +// dnsCompatibleBucketName returns true if the bucket name is DNS compatible. +// Buckets created outside of the classic region MUST be DNS compatible. +func dnsCompatibleBucketName(bucket string) bool { + return reDomain.MatchString(bucket) && + !reIPAddress.MatchString(bucket) && + !strings.Contains(bucket, "..") +} + +// moveBucketToHost moves the bucket name from the URI path to URL host. +func moveBucketToHost(u *url.URL, bucket string) { + u.Host = bucket + "." + u.Host + removeBucketFromPath(u) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go new file mode 100644 index 000000000..308b7d473 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go @@ -0,0 +1,9 @@ +//go:build !go1.6 +// +build !go1.6 + +package s3 + +import "github.com/aws/aws-sdk-go/aws/request" + +func platformRequestHandlers(r *request.Request) { +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go new file mode 100644 index 000000000..70feffab7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go @@ -0,0 +1,29 @@ +//go:build go1.6 +// +build go1.6 + +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +func platformRequestHandlers(r *request.Request) { + if r.Operation.HTTPMethod == "PUT" { + // 100-Continue should only be used on put requests. + r.Handlers.Sign.PushBack(add100Continue) + } +} + +func add100Continue(r *request.Request) { + if aws.BoolValue(r.Config.S3Disable100Continue) { + return + } + if r.HTTPRequest.ContentLength < 1024*1024*2 { + // Ignore requests smaller than 2MB. This helps prevent delaying + // requests unnecessarily. + return + } + + r.HTTPRequest.Header.Set("Expect", "100-continue") +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go new file mode 100644 index 000000000..d13b46170 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go @@ -0,0 +1,490 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package s3iface provides an interface to enable mocking the Amazon Simple Storage Service service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package s3iface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" +) + +// S3API provides an interface to enable mocking the +// s3.S3 service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // Amazon Simple Storage Service. +// func myFunc(svc s3iface.S3API) bool { +// // Make svc.AbortMultipartUpload request +// } +// +// func main() { +// sess := session.New() +// svc := s3.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockS3Client struct { +// s3iface.S3API +// } +// func (m *mockS3Client) AbortMultipartUpload(input *s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockS3Client{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type S3API interface { + AbortMultipartUpload(*s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) + AbortMultipartUploadWithContext(aws.Context, *s3.AbortMultipartUploadInput, ...request.Option) (*s3.AbortMultipartUploadOutput, error) + AbortMultipartUploadRequest(*s3.AbortMultipartUploadInput) (*request.Request, *s3.AbortMultipartUploadOutput) + + CompleteMultipartUpload(*s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) + CompleteMultipartUploadWithContext(aws.Context, *s3.CompleteMultipartUploadInput, ...request.Option) (*s3.CompleteMultipartUploadOutput, error) + CompleteMultipartUploadRequest(*s3.CompleteMultipartUploadInput) (*request.Request, *s3.CompleteMultipartUploadOutput) + + CopyObject(*s3.CopyObjectInput) (*s3.CopyObjectOutput, error) + CopyObjectWithContext(aws.Context, *s3.CopyObjectInput, ...request.Option) (*s3.CopyObjectOutput, error) + CopyObjectRequest(*s3.CopyObjectInput) (*request.Request, *s3.CopyObjectOutput) + + CreateBucket(*s3.CreateBucketInput) (*s3.CreateBucketOutput, error) + CreateBucketWithContext(aws.Context, *s3.CreateBucketInput, ...request.Option) (*s3.CreateBucketOutput, error) + CreateBucketRequest(*s3.CreateBucketInput) (*request.Request, *s3.CreateBucketOutput) + + CreateMultipartUpload(*s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) + CreateMultipartUploadWithContext(aws.Context, *s3.CreateMultipartUploadInput, ...request.Option) (*s3.CreateMultipartUploadOutput, error) + CreateMultipartUploadRequest(*s3.CreateMultipartUploadInput) (*request.Request, *s3.CreateMultipartUploadOutput) + + CreateSession(*s3.CreateSessionInput) (*s3.CreateSessionOutput, error) + CreateSessionWithContext(aws.Context, *s3.CreateSessionInput, ...request.Option) (*s3.CreateSessionOutput, error) + CreateSessionRequest(*s3.CreateSessionInput) (*request.Request, *s3.CreateSessionOutput) + + DeleteBucket(*s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error) + DeleteBucketWithContext(aws.Context, *s3.DeleteBucketInput, ...request.Option) (*s3.DeleteBucketOutput, error) + DeleteBucketRequest(*s3.DeleteBucketInput) (*request.Request, *s3.DeleteBucketOutput) + + DeleteBucketAnalyticsConfiguration(*s3.DeleteBucketAnalyticsConfigurationInput) (*s3.DeleteBucketAnalyticsConfigurationOutput, error) + DeleteBucketAnalyticsConfigurationWithContext(aws.Context, *s3.DeleteBucketAnalyticsConfigurationInput, ...request.Option) (*s3.DeleteBucketAnalyticsConfigurationOutput, error) + DeleteBucketAnalyticsConfigurationRequest(*s3.DeleteBucketAnalyticsConfigurationInput) (*request.Request, *s3.DeleteBucketAnalyticsConfigurationOutput) + + DeleteBucketCors(*s3.DeleteBucketCorsInput) (*s3.DeleteBucketCorsOutput, error) + DeleteBucketCorsWithContext(aws.Context, *s3.DeleteBucketCorsInput, ...request.Option) (*s3.DeleteBucketCorsOutput, error) + DeleteBucketCorsRequest(*s3.DeleteBucketCorsInput) (*request.Request, *s3.DeleteBucketCorsOutput) + + DeleteBucketEncryption(*s3.DeleteBucketEncryptionInput) (*s3.DeleteBucketEncryptionOutput, error) + DeleteBucketEncryptionWithContext(aws.Context, *s3.DeleteBucketEncryptionInput, ...request.Option) (*s3.DeleteBucketEncryptionOutput, error) + DeleteBucketEncryptionRequest(*s3.DeleteBucketEncryptionInput) (*request.Request, *s3.DeleteBucketEncryptionOutput) + + DeleteBucketIntelligentTieringConfiguration(*s3.DeleteBucketIntelligentTieringConfigurationInput) (*s3.DeleteBucketIntelligentTieringConfigurationOutput, error) + DeleteBucketIntelligentTieringConfigurationWithContext(aws.Context, *s3.DeleteBucketIntelligentTieringConfigurationInput, ...request.Option) (*s3.DeleteBucketIntelligentTieringConfigurationOutput, error) + DeleteBucketIntelligentTieringConfigurationRequest(*s3.DeleteBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.DeleteBucketIntelligentTieringConfigurationOutput) + + DeleteBucketInventoryConfiguration(*s3.DeleteBucketInventoryConfigurationInput) (*s3.DeleteBucketInventoryConfigurationOutput, error) + DeleteBucketInventoryConfigurationWithContext(aws.Context, *s3.DeleteBucketInventoryConfigurationInput, ...request.Option) (*s3.DeleteBucketInventoryConfigurationOutput, error) + DeleteBucketInventoryConfigurationRequest(*s3.DeleteBucketInventoryConfigurationInput) (*request.Request, *s3.DeleteBucketInventoryConfigurationOutput) + + DeleteBucketLifecycle(*s3.DeleteBucketLifecycleInput) (*s3.DeleteBucketLifecycleOutput, error) + DeleteBucketLifecycleWithContext(aws.Context, *s3.DeleteBucketLifecycleInput, ...request.Option) (*s3.DeleteBucketLifecycleOutput, error) + DeleteBucketLifecycleRequest(*s3.DeleteBucketLifecycleInput) (*request.Request, *s3.DeleteBucketLifecycleOutput) + + DeleteBucketMetricsConfiguration(*s3.DeleteBucketMetricsConfigurationInput) (*s3.DeleteBucketMetricsConfigurationOutput, error) + DeleteBucketMetricsConfigurationWithContext(aws.Context, *s3.DeleteBucketMetricsConfigurationInput, ...request.Option) (*s3.DeleteBucketMetricsConfigurationOutput, error) + DeleteBucketMetricsConfigurationRequest(*s3.DeleteBucketMetricsConfigurationInput) (*request.Request, *s3.DeleteBucketMetricsConfigurationOutput) + + DeleteBucketOwnershipControls(*s3.DeleteBucketOwnershipControlsInput) (*s3.DeleteBucketOwnershipControlsOutput, error) + DeleteBucketOwnershipControlsWithContext(aws.Context, *s3.DeleteBucketOwnershipControlsInput, ...request.Option) (*s3.DeleteBucketOwnershipControlsOutput, error) + DeleteBucketOwnershipControlsRequest(*s3.DeleteBucketOwnershipControlsInput) (*request.Request, *s3.DeleteBucketOwnershipControlsOutput) + + DeleteBucketPolicy(*s3.DeleteBucketPolicyInput) (*s3.DeleteBucketPolicyOutput, error) + DeleteBucketPolicyWithContext(aws.Context, *s3.DeleteBucketPolicyInput, ...request.Option) (*s3.DeleteBucketPolicyOutput, error) + DeleteBucketPolicyRequest(*s3.DeleteBucketPolicyInput) (*request.Request, *s3.DeleteBucketPolicyOutput) + + DeleteBucketReplication(*s3.DeleteBucketReplicationInput) (*s3.DeleteBucketReplicationOutput, error) + DeleteBucketReplicationWithContext(aws.Context, *s3.DeleteBucketReplicationInput, ...request.Option) (*s3.DeleteBucketReplicationOutput, error) + DeleteBucketReplicationRequest(*s3.DeleteBucketReplicationInput) (*request.Request, *s3.DeleteBucketReplicationOutput) + + DeleteBucketTagging(*s3.DeleteBucketTaggingInput) (*s3.DeleteBucketTaggingOutput, error) + DeleteBucketTaggingWithContext(aws.Context, *s3.DeleteBucketTaggingInput, ...request.Option) (*s3.DeleteBucketTaggingOutput, error) + DeleteBucketTaggingRequest(*s3.DeleteBucketTaggingInput) (*request.Request, *s3.DeleteBucketTaggingOutput) + + DeleteBucketWebsite(*s3.DeleteBucketWebsiteInput) (*s3.DeleteBucketWebsiteOutput, error) + DeleteBucketWebsiteWithContext(aws.Context, *s3.DeleteBucketWebsiteInput, ...request.Option) (*s3.DeleteBucketWebsiteOutput, error) + DeleteBucketWebsiteRequest(*s3.DeleteBucketWebsiteInput) (*request.Request, *s3.DeleteBucketWebsiteOutput) + + DeleteObject(*s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) + DeleteObjectWithContext(aws.Context, *s3.DeleteObjectInput, ...request.Option) (*s3.DeleteObjectOutput, error) + DeleteObjectRequest(*s3.DeleteObjectInput) (*request.Request, *s3.DeleteObjectOutput) + + DeleteObjectTagging(*s3.DeleteObjectTaggingInput) (*s3.DeleteObjectTaggingOutput, error) + DeleteObjectTaggingWithContext(aws.Context, *s3.DeleteObjectTaggingInput, ...request.Option) (*s3.DeleteObjectTaggingOutput, error) + DeleteObjectTaggingRequest(*s3.DeleteObjectTaggingInput) (*request.Request, *s3.DeleteObjectTaggingOutput) + + DeleteObjects(*s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error) + DeleteObjectsWithContext(aws.Context, *s3.DeleteObjectsInput, ...request.Option) (*s3.DeleteObjectsOutput, error) + DeleteObjectsRequest(*s3.DeleteObjectsInput) (*request.Request, *s3.DeleteObjectsOutput) + + DeletePublicAccessBlock(*s3.DeletePublicAccessBlockInput) (*s3.DeletePublicAccessBlockOutput, error) + DeletePublicAccessBlockWithContext(aws.Context, *s3.DeletePublicAccessBlockInput, ...request.Option) (*s3.DeletePublicAccessBlockOutput, error) + DeletePublicAccessBlockRequest(*s3.DeletePublicAccessBlockInput) (*request.Request, *s3.DeletePublicAccessBlockOutput) + + GetBucketAccelerateConfiguration(*s3.GetBucketAccelerateConfigurationInput) (*s3.GetBucketAccelerateConfigurationOutput, error) + GetBucketAccelerateConfigurationWithContext(aws.Context, *s3.GetBucketAccelerateConfigurationInput, ...request.Option) (*s3.GetBucketAccelerateConfigurationOutput, error) + GetBucketAccelerateConfigurationRequest(*s3.GetBucketAccelerateConfigurationInput) (*request.Request, *s3.GetBucketAccelerateConfigurationOutput) + + GetBucketAcl(*s3.GetBucketAclInput) (*s3.GetBucketAclOutput, error) + GetBucketAclWithContext(aws.Context, *s3.GetBucketAclInput, ...request.Option) (*s3.GetBucketAclOutput, error) + GetBucketAclRequest(*s3.GetBucketAclInput) (*request.Request, *s3.GetBucketAclOutput) + + GetBucketAnalyticsConfiguration(*s3.GetBucketAnalyticsConfigurationInput) (*s3.GetBucketAnalyticsConfigurationOutput, error) + GetBucketAnalyticsConfigurationWithContext(aws.Context, *s3.GetBucketAnalyticsConfigurationInput, ...request.Option) (*s3.GetBucketAnalyticsConfigurationOutput, error) + GetBucketAnalyticsConfigurationRequest(*s3.GetBucketAnalyticsConfigurationInput) (*request.Request, *s3.GetBucketAnalyticsConfigurationOutput) + + GetBucketCors(*s3.GetBucketCorsInput) (*s3.GetBucketCorsOutput, error) + GetBucketCorsWithContext(aws.Context, *s3.GetBucketCorsInput, ...request.Option) (*s3.GetBucketCorsOutput, error) + GetBucketCorsRequest(*s3.GetBucketCorsInput) (*request.Request, *s3.GetBucketCorsOutput) + + GetBucketEncryption(*s3.GetBucketEncryptionInput) (*s3.GetBucketEncryptionOutput, error) + GetBucketEncryptionWithContext(aws.Context, *s3.GetBucketEncryptionInput, ...request.Option) (*s3.GetBucketEncryptionOutput, error) + GetBucketEncryptionRequest(*s3.GetBucketEncryptionInput) (*request.Request, *s3.GetBucketEncryptionOutput) + + GetBucketIntelligentTieringConfiguration(*s3.GetBucketIntelligentTieringConfigurationInput) (*s3.GetBucketIntelligentTieringConfigurationOutput, error) + GetBucketIntelligentTieringConfigurationWithContext(aws.Context, *s3.GetBucketIntelligentTieringConfigurationInput, ...request.Option) (*s3.GetBucketIntelligentTieringConfigurationOutput, error) + GetBucketIntelligentTieringConfigurationRequest(*s3.GetBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.GetBucketIntelligentTieringConfigurationOutput) + + GetBucketInventoryConfiguration(*s3.GetBucketInventoryConfigurationInput) (*s3.GetBucketInventoryConfigurationOutput, error) + GetBucketInventoryConfigurationWithContext(aws.Context, *s3.GetBucketInventoryConfigurationInput, ...request.Option) (*s3.GetBucketInventoryConfigurationOutput, error) + GetBucketInventoryConfigurationRequest(*s3.GetBucketInventoryConfigurationInput) (*request.Request, *s3.GetBucketInventoryConfigurationOutput) + + GetBucketLifecycle(*s3.GetBucketLifecycleInput) (*s3.GetBucketLifecycleOutput, error) + GetBucketLifecycleWithContext(aws.Context, *s3.GetBucketLifecycleInput, ...request.Option) (*s3.GetBucketLifecycleOutput, error) + GetBucketLifecycleRequest(*s3.GetBucketLifecycleInput) (*request.Request, *s3.GetBucketLifecycleOutput) + + GetBucketLifecycleConfiguration(*s3.GetBucketLifecycleConfigurationInput) (*s3.GetBucketLifecycleConfigurationOutput, error) + GetBucketLifecycleConfigurationWithContext(aws.Context, *s3.GetBucketLifecycleConfigurationInput, ...request.Option) (*s3.GetBucketLifecycleConfigurationOutput, error) + GetBucketLifecycleConfigurationRequest(*s3.GetBucketLifecycleConfigurationInput) (*request.Request, *s3.GetBucketLifecycleConfigurationOutput) + + GetBucketLocation(*s3.GetBucketLocationInput) (*s3.GetBucketLocationOutput, error) + GetBucketLocationWithContext(aws.Context, *s3.GetBucketLocationInput, ...request.Option) (*s3.GetBucketLocationOutput, error) + GetBucketLocationRequest(*s3.GetBucketLocationInput) (*request.Request, *s3.GetBucketLocationOutput) + + GetBucketLogging(*s3.GetBucketLoggingInput) (*s3.GetBucketLoggingOutput, error) + GetBucketLoggingWithContext(aws.Context, *s3.GetBucketLoggingInput, ...request.Option) (*s3.GetBucketLoggingOutput, error) + GetBucketLoggingRequest(*s3.GetBucketLoggingInput) (*request.Request, *s3.GetBucketLoggingOutput) + + GetBucketMetricsConfiguration(*s3.GetBucketMetricsConfigurationInput) (*s3.GetBucketMetricsConfigurationOutput, error) + GetBucketMetricsConfigurationWithContext(aws.Context, *s3.GetBucketMetricsConfigurationInput, ...request.Option) (*s3.GetBucketMetricsConfigurationOutput, error) + GetBucketMetricsConfigurationRequest(*s3.GetBucketMetricsConfigurationInput) (*request.Request, *s3.GetBucketMetricsConfigurationOutput) + + GetBucketNotification(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfigurationDeprecated, error) + GetBucketNotificationWithContext(aws.Context, *s3.GetBucketNotificationConfigurationRequest, ...request.Option) (*s3.NotificationConfigurationDeprecated, error) + GetBucketNotificationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfigurationDeprecated) + + GetBucketNotificationConfiguration(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfiguration, error) + GetBucketNotificationConfigurationWithContext(aws.Context, *s3.GetBucketNotificationConfigurationRequest, ...request.Option) (*s3.NotificationConfiguration, error) + GetBucketNotificationConfigurationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfiguration) + + GetBucketOwnershipControls(*s3.GetBucketOwnershipControlsInput) (*s3.GetBucketOwnershipControlsOutput, error) + GetBucketOwnershipControlsWithContext(aws.Context, *s3.GetBucketOwnershipControlsInput, ...request.Option) (*s3.GetBucketOwnershipControlsOutput, error) + GetBucketOwnershipControlsRequest(*s3.GetBucketOwnershipControlsInput) (*request.Request, *s3.GetBucketOwnershipControlsOutput) + + GetBucketPolicy(*s3.GetBucketPolicyInput) (*s3.GetBucketPolicyOutput, error) + GetBucketPolicyWithContext(aws.Context, *s3.GetBucketPolicyInput, ...request.Option) (*s3.GetBucketPolicyOutput, error) + GetBucketPolicyRequest(*s3.GetBucketPolicyInput) (*request.Request, *s3.GetBucketPolicyOutput) + + GetBucketPolicyStatus(*s3.GetBucketPolicyStatusInput) (*s3.GetBucketPolicyStatusOutput, error) + GetBucketPolicyStatusWithContext(aws.Context, *s3.GetBucketPolicyStatusInput, ...request.Option) (*s3.GetBucketPolicyStatusOutput, error) + GetBucketPolicyStatusRequest(*s3.GetBucketPolicyStatusInput) (*request.Request, *s3.GetBucketPolicyStatusOutput) + + GetBucketReplication(*s3.GetBucketReplicationInput) (*s3.GetBucketReplicationOutput, error) + GetBucketReplicationWithContext(aws.Context, *s3.GetBucketReplicationInput, ...request.Option) (*s3.GetBucketReplicationOutput, error) + GetBucketReplicationRequest(*s3.GetBucketReplicationInput) (*request.Request, *s3.GetBucketReplicationOutput) + + GetBucketRequestPayment(*s3.GetBucketRequestPaymentInput) (*s3.GetBucketRequestPaymentOutput, error) + GetBucketRequestPaymentWithContext(aws.Context, *s3.GetBucketRequestPaymentInput, ...request.Option) (*s3.GetBucketRequestPaymentOutput, error) + GetBucketRequestPaymentRequest(*s3.GetBucketRequestPaymentInput) (*request.Request, *s3.GetBucketRequestPaymentOutput) + + GetBucketTagging(*s3.GetBucketTaggingInput) (*s3.GetBucketTaggingOutput, error) + GetBucketTaggingWithContext(aws.Context, *s3.GetBucketTaggingInput, ...request.Option) (*s3.GetBucketTaggingOutput, error) + GetBucketTaggingRequest(*s3.GetBucketTaggingInput) (*request.Request, *s3.GetBucketTaggingOutput) + + GetBucketVersioning(*s3.GetBucketVersioningInput) (*s3.GetBucketVersioningOutput, error) + GetBucketVersioningWithContext(aws.Context, *s3.GetBucketVersioningInput, ...request.Option) (*s3.GetBucketVersioningOutput, error) + GetBucketVersioningRequest(*s3.GetBucketVersioningInput) (*request.Request, *s3.GetBucketVersioningOutput) + + GetBucketWebsite(*s3.GetBucketWebsiteInput) (*s3.GetBucketWebsiteOutput, error) + GetBucketWebsiteWithContext(aws.Context, *s3.GetBucketWebsiteInput, ...request.Option) (*s3.GetBucketWebsiteOutput, error) + GetBucketWebsiteRequest(*s3.GetBucketWebsiteInput) (*request.Request, *s3.GetBucketWebsiteOutput) + + GetObject(*s3.GetObjectInput) (*s3.GetObjectOutput, error) + GetObjectWithContext(aws.Context, *s3.GetObjectInput, ...request.Option) (*s3.GetObjectOutput, error) + GetObjectRequest(*s3.GetObjectInput) (*request.Request, *s3.GetObjectOutput) + + GetObjectAcl(*s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) + GetObjectAclWithContext(aws.Context, *s3.GetObjectAclInput, ...request.Option) (*s3.GetObjectAclOutput, error) + GetObjectAclRequest(*s3.GetObjectAclInput) (*request.Request, *s3.GetObjectAclOutput) + + GetObjectAttributes(*s3.GetObjectAttributesInput) (*s3.GetObjectAttributesOutput, error) + GetObjectAttributesWithContext(aws.Context, *s3.GetObjectAttributesInput, ...request.Option) (*s3.GetObjectAttributesOutput, error) + GetObjectAttributesRequest(*s3.GetObjectAttributesInput) (*request.Request, *s3.GetObjectAttributesOutput) + + GetObjectLegalHold(*s3.GetObjectLegalHoldInput) (*s3.GetObjectLegalHoldOutput, error) + GetObjectLegalHoldWithContext(aws.Context, *s3.GetObjectLegalHoldInput, ...request.Option) (*s3.GetObjectLegalHoldOutput, error) + GetObjectLegalHoldRequest(*s3.GetObjectLegalHoldInput) (*request.Request, *s3.GetObjectLegalHoldOutput) + + GetObjectLockConfiguration(*s3.GetObjectLockConfigurationInput) (*s3.GetObjectLockConfigurationOutput, error) + GetObjectLockConfigurationWithContext(aws.Context, *s3.GetObjectLockConfigurationInput, ...request.Option) (*s3.GetObjectLockConfigurationOutput, error) + GetObjectLockConfigurationRequest(*s3.GetObjectLockConfigurationInput) (*request.Request, *s3.GetObjectLockConfigurationOutput) + + GetObjectRetention(*s3.GetObjectRetentionInput) (*s3.GetObjectRetentionOutput, error) + GetObjectRetentionWithContext(aws.Context, *s3.GetObjectRetentionInput, ...request.Option) (*s3.GetObjectRetentionOutput, error) + GetObjectRetentionRequest(*s3.GetObjectRetentionInput) (*request.Request, *s3.GetObjectRetentionOutput) + + GetObjectTagging(*s3.GetObjectTaggingInput) (*s3.GetObjectTaggingOutput, error) + GetObjectTaggingWithContext(aws.Context, *s3.GetObjectTaggingInput, ...request.Option) (*s3.GetObjectTaggingOutput, error) + GetObjectTaggingRequest(*s3.GetObjectTaggingInput) (*request.Request, *s3.GetObjectTaggingOutput) + + GetObjectTorrent(*s3.GetObjectTorrentInput) (*s3.GetObjectTorrentOutput, error) + GetObjectTorrentWithContext(aws.Context, *s3.GetObjectTorrentInput, ...request.Option) (*s3.GetObjectTorrentOutput, error) + GetObjectTorrentRequest(*s3.GetObjectTorrentInput) (*request.Request, *s3.GetObjectTorrentOutput) + + GetPublicAccessBlock(*s3.GetPublicAccessBlockInput) (*s3.GetPublicAccessBlockOutput, error) + GetPublicAccessBlockWithContext(aws.Context, *s3.GetPublicAccessBlockInput, ...request.Option) (*s3.GetPublicAccessBlockOutput, error) + GetPublicAccessBlockRequest(*s3.GetPublicAccessBlockInput) (*request.Request, *s3.GetPublicAccessBlockOutput) + + HeadBucket(*s3.HeadBucketInput) (*s3.HeadBucketOutput, error) + HeadBucketWithContext(aws.Context, *s3.HeadBucketInput, ...request.Option) (*s3.HeadBucketOutput, error) + HeadBucketRequest(*s3.HeadBucketInput) (*request.Request, *s3.HeadBucketOutput) + + HeadObject(*s3.HeadObjectInput) (*s3.HeadObjectOutput, error) + HeadObjectWithContext(aws.Context, *s3.HeadObjectInput, ...request.Option) (*s3.HeadObjectOutput, error) + HeadObjectRequest(*s3.HeadObjectInput) (*request.Request, *s3.HeadObjectOutput) + + ListBucketAnalyticsConfigurations(*s3.ListBucketAnalyticsConfigurationsInput) (*s3.ListBucketAnalyticsConfigurationsOutput, error) + ListBucketAnalyticsConfigurationsWithContext(aws.Context, *s3.ListBucketAnalyticsConfigurationsInput, ...request.Option) (*s3.ListBucketAnalyticsConfigurationsOutput, error) + ListBucketAnalyticsConfigurationsRequest(*s3.ListBucketAnalyticsConfigurationsInput) (*request.Request, *s3.ListBucketAnalyticsConfigurationsOutput) + + ListBucketIntelligentTieringConfigurations(*s3.ListBucketIntelligentTieringConfigurationsInput) (*s3.ListBucketIntelligentTieringConfigurationsOutput, error) + ListBucketIntelligentTieringConfigurationsWithContext(aws.Context, *s3.ListBucketIntelligentTieringConfigurationsInput, ...request.Option) (*s3.ListBucketIntelligentTieringConfigurationsOutput, error) + ListBucketIntelligentTieringConfigurationsRequest(*s3.ListBucketIntelligentTieringConfigurationsInput) (*request.Request, *s3.ListBucketIntelligentTieringConfigurationsOutput) + + ListBucketInventoryConfigurations(*s3.ListBucketInventoryConfigurationsInput) (*s3.ListBucketInventoryConfigurationsOutput, error) + ListBucketInventoryConfigurationsWithContext(aws.Context, *s3.ListBucketInventoryConfigurationsInput, ...request.Option) (*s3.ListBucketInventoryConfigurationsOutput, error) + ListBucketInventoryConfigurationsRequest(*s3.ListBucketInventoryConfigurationsInput) (*request.Request, *s3.ListBucketInventoryConfigurationsOutput) + + ListBucketMetricsConfigurations(*s3.ListBucketMetricsConfigurationsInput) (*s3.ListBucketMetricsConfigurationsOutput, error) + ListBucketMetricsConfigurationsWithContext(aws.Context, *s3.ListBucketMetricsConfigurationsInput, ...request.Option) (*s3.ListBucketMetricsConfigurationsOutput, error) + ListBucketMetricsConfigurationsRequest(*s3.ListBucketMetricsConfigurationsInput) (*request.Request, *s3.ListBucketMetricsConfigurationsOutput) + + ListBuckets(*s3.ListBucketsInput) (*s3.ListBucketsOutput, error) + ListBucketsWithContext(aws.Context, *s3.ListBucketsInput, ...request.Option) (*s3.ListBucketsOutput, error) + ListBucketsRequest(*s3.ListBucketsInput) (*request.Request, *s3.ListBucketsOutput) + + ListDirectoryBuckets(*s3.ListDirectoryBucketsInput) (*s3.ListDirectoryBucketsOutput, error) + ListDirectoryBucketsWithContext(aws.Context, *s3.ListDirectoryBucketsInput, ...request.Option) (*s3.ListDirectoryBucketsOutput, error) + ListDirectoryBucketsRequest(*s3.ListDirectoryBucketsInput) (*request.Request, *s3.ListDirectoryBucketsOutput) + + ListDirectoryBucketsPages(*s3.ListDirectoryBucketsInput, func(*s3.ListDirectoryBucketsOutput, bool) bool) error + ListDirectoryBucketsPagesWithContext(aws.Context, *s3.ListDirectoryBucketsInput, func(*s3.ListDirectoryBucketsOutput, bool) bool, ...request.Option) error + + ListMultipartUploads(*s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) + ListMultipartUploadsWithContext(aws.Context, *s3.ListMultipartUploadsInput, ...request.Option) (*s3.ListMultipartUploadsOutput, error) + ListMultipartUploadsRequest(*s3.ListMultipartUploadsInput) (*request.Request, *s3.ListMultipartUploadsOutput) + + ListMultipartUploadsPages(*s3.ListMultipartUploadsInput, func(*s3.ListMultipartUploadsOutput, bool) bool) error + ListMultipartUploadsPagesWithContext(aws.Context, *s3.ListMultipartUploadsInput, func(*s3.ListMultipartUploadsOutput, bool) bool, ...request.Option) error + + ListObjectVersions(*s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error) + ListObjectVersionsWithContext(aws.Context, *s3.ListObjectVersionsInput, ...request.Option) (*s3.ListObjectVersionsOutput, error) + ListObjectVersionsRequest(*s3.ListObjectVersionsInput) (*request.Request, *s3.ListObjectVersionsOutput) + + ListObjectVersionsPages(*s3.ListObjectVersionsInput, func(*s3.ListObjectVersionsOutput, bool) bool) error + ListObjectVersionsPagesWithContext(aws.Context, *s3.ListObjectVersionsInput, func(*s3.ListObjectVersionsOutput, bool) bool, ...request.Option) error + + ListObjects(*s3.ListObjectsInput) (*s3.ListObjectsOutput, error) + ListObjectsWithContext(aws.Context, *s3.ListObjectsInput, ...request.Option) (*s3.ListObjectsOutput, error) + ListObjectsRequest(*s3.ListObjectsInput) (*request.Request, *s3.ListObjectsOutput) + + ListObjectsPages(*s3.ListObjectsInput, func(*s3.ListObjectsOutput, bool) bool) error + ListObjectsPagesWithContext(aws.Context, *s3.ListObjectsInput, func(*s3.ListObjectsOutput, bool) bool, ...request.Option) error + + ListObjectsV2(*s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) + ListObjectsV2WithContext(aws.Context, *s3.ListObjectsV2Input, ...request.Option) (*s3.ListObjectsV2Output, error) + ListObjectsV2Request(*s3.ListObjectsV2Input) (*request.Request, *s3.ListObjectsV2Output) + + ListObjectsV2Pages(*s3.ListObjectsV2Input, func(*s3.ListObjectsV2Output, bool) bool) error + ListObjectsV2PagesWithContext(aws.Context, *s3.ListObjectsV2Input, func(*s3.ListObjectsV2Output, bool) bool, ...request.Option) error + + ListParts(*s3.ListPartsInput) (*s3.ListPartsOutput, error) + ListPartsWithContext(aws.Context, *s3.ListPartsInput, ...request.Option) (*s3.ListPartsOutput, error) + ListPartsRequest(*s3.ListPartsInput) (*request.Request, *s3.ListPartsOutput) + + ListPartsPages(*s3.ListPartsInput, func(*s3.ListPartsOutput, bool) bool) error + ListPartsPagesWithContext(aws.Context, *s3.ListPartsInput, func(*s3.ListPartsOutput, bool) bool, ...request.Option) error + + PutBucketAccelerateConfiguration(*s3.PutBucketAccelerateConfigurationInput) (*s3.PutBucketAccelerateConfigurationOutput, error) + PutBucketAccelerateConfigurationWithContext(aws.Context, *s3.PutBucketAccelerateConfigurationInput, ...request.Option) (*s3.PutBucketAccelerateConfigurationOutput, error) + PutBucketAccelerateConfigurationRequest(*s3.PutBucketAccelerateConfigurationInput) (*request.Request, *s3.PutBucketAccelerateConfigurationOutput) + + PutBucketAcl(*s3.PutBucketAclInput) (*s3.PutBucketAclOutput, error) + PutBucketAclWithContext(aws.Context, *s3.PutBucketAclInput, ...request.Option) (*s3.PutBucketAclOutput, error) + PutBucketAclRequest(*s3.PutBucketAclInput) (*request.Request, *s3.PutBucketAclOutput) + + PutBucketAnalyticsConfiguration(*s3.PutBucketAnalyticsConfigurationInput) (*s3.PutBucketAnalyticsConfigurationOutput, error) + PutBucketAnalyticsConfigurationWithContext(aws.Context, *s3.PutBucketAnalyticsConfigurationInput, ...request.Option) (*s3.PutBucketAnalyticsConfigurationOutput, error) + PutBucketAnalyticsConfigurationRequest(*s3.PutBucketAnalyticsConfigurationInput) (*request.Request, *s3.PutBucketAnalyticsConfigurationOutput) + + PutBucketCors(*s3.PutBucketCorsInput) (*s3.PutBucketCorsOutput, error) + PutBucketCorsWithContext(aws.Context, *s3.PutBucketCorsInput, ...request.Option) (*s3.PutBucketCorsOutput, error) + PutBucketCorsRequest(*s3.PutBucketCorsInput) (*request.Request, *s3.PutBucketCorsOutput) + + PutBucketEncryption(*s3.PutBucketEncryptionInput) (*s3.PutBucketEncryptionOutput, error) + PutBucketEncryptionWithContext(aws.Context, *s3.PutBucketEncryptionInput, ...request.Option) (*s3.PutBucketEncryptionOutput, error) + PutBucketEncryptionRequest(*s3.PutBucketEncryptionInput) (*request.Request, *s3.PutBucketEncryptionOutput) + + PutBucketIntelligentTieringConfiguration(*s3.PutBucketIntelligentTieringConfigurationInput) (*s3.PutBucketIntelligentTieringConfigurationOutput, error) + PutBucketIntelligentTieringConfigurationWithContext(aws.Context, *s3.PutBucketIntelligentTieringConfigurationInput, ...request.Option) (*s3.PutBucketIntelligentTieringConfigurationOutput, error) + PutBucketIntelligentTieringConfigurationRequest(*s3.PutBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.PutBucketIntelligentTieringConfigurationOutput) + + PutBucketInventoryConfiguration(*s3.PutBucketInventoryConfigurationInput) (*s3.PutBucketInventoryConfigurationOutput, error) + PutBucketInventoryConfigurationWithContext(aws.Context, *s3.PutBucketInventoryConfigurationInput, ...request.Option) (*s3.PutBucketInventoryConfigurationOutput, error) + PutBucketInventoryConfigurationRequest(*s3.PutBucketInventoryConfigurationInput) (*request.Request, *s3.PutBucketInventoryConfigurationOutput) + + PutBucketLifecycle(*s3.PutBucketLifecycleInput) (*s3.PutBucketLifecycleOutput, error) + PutBucketLifecycleWithContext(aws.Context, *s3.PutBucketLifecycleInput, ...request.Option) (*s3.PutBucketLifecycleOutput, error) + PutBucketLifecycleRequest(*s3.PutBucketLifecycleInput) (*request.Request, *s3.PutBucketLifecycleOutput) + + PutBucketLifecycleConfiguration(*s3.PutBucketLifecycleConfigurationInput) (*s3.PutBucketLifecycleConfigurationOutput, error) + PutBucketLifecycleConfigurationWithContext(aws.Context, *s3.PutBucketLifecycleConfigurationInput, ...request.Option) (*s3.PutBucketLifecycleConfigurationOutput, error) + PutBucketLifecycleConfigurationRequest(*s3.PutBucketLifecycleConfigurationInput) (*request.Request, *s3.PutBucketLifecycleConfigurationOutput) + + PutBucketLogging(*s3.PutBucketLoggingInput) (*s3.PutBucketLoggingOutput, error) + PutBucketLoggingWithContext(aws.Context, *s3.PutBucketLoggingInput, ...request.Option) (*s3.PutBucketLoggingOutput, error) + PutBucketLoggingRequest(*s3.PutBucketLoggingInput) (*request.Request, *s3.PutBucketLoggingOutput) + + PutBucketMetricsConfiguration(*s3.PutBucketMetricsConfigurationInput) (*s3.PutBucketMetricsConfigurationOutput, error) + PutBucketMetricsConfigurationWithContext(aws.Context, *s3.PutBucketMetricsConfigurationInput, ...request.Option) (*s3.PutBucketMetricsConfigurationOutput, error) + PutBucketMetricsConfigurationRequest(*s3.PutBucketMetricsConfigurationInput) (*request.Request, *s3.PutBucketMetricsConfigurationOutput) + + PutBucketNotification(*s3.PutBucketNotificationInput) (*s3.PutBucketNotificationOutput, error) + PutBucketNotificationWithContext(aws.Context, *s3.PutBucketNotificationInput, ...request.Option) (*s3.PutBucketNotificationOutput, error) + PutBucketNotificationRequest(*s3.PutBucketNotificationInput) (*request.Request, *s3.PutBucketNotificationOutput) + + PutBucketNotificationConfiguration(*s3.PutBucketNotificationConfigurationInput) (*s3.PutBucketNotificationConfigurationOutput, error) + PutBucketNotificationConfigurationWithContext(aws.Context, *s3.PutBucketNotificationConfigurationInput, ...request.Option) (*s3.PutBucketNotificationConfigurationOutput, error) + PutBucketNotificationConfigurationRequest(*s3.PutBucketNotificationConfigurationInput) (*request.Request, *s3.PutBucketNotificationConfigurationOutput) + + PutBucketOwnershipControls(*s3.PutBucketOwnershipControlsInput) (*s3.PutBucketOwnershipControlsOutput, error) + PutBucketOwnershipControlsWithContext(aws.Context, *s3.PutBucketOwnershipControlsInput, ...request.Option) (*s3.PutBucketOwnershipControlsOutput, error) + PutBucketOwnershipControlsRequest(*s3.PutBucketOwnershipControlsInput) (*request.Request, *s3.PutBucketOwnershipControlsOutput) + + PutBucketPolicy(*s3.PutBucketPolicyInput) (*s3.PutBucketPolicyOutput, error) + PutBucketPolicyWithContext(aws.Context, *s3.PutBucketPolicyInput, ...request.Option) (*s3.PutBucketPolicyOutput, error) + PutBucketPolicyRequest(*s3.PutBucketPolicyInput) (*request.Request, *s3.PutBucketPolicyOutput) + + PutBucketReplication(*s3.PutBucketReplicationInput) (*s3.PutBucketReplicationOutput, error) + PutBucketReplicationWithContext(aws.Context, *s3.PutBucketReplicationInput, ...request.Option) (*s3.PutBucketReplicationOutput, error) + PutBucketReplicationRequest(*s3.PutBucketReplicationInput) (*request.Request, *s3.PutBucketReplicationOutput) + + PutBucketRequestPayment(*s3.PutBucketRequestPaymentInput) (*s3.PutBucketRequestPaymentOutput, error) + PutBucketRequestPaymentWithContext(aws.Context, *s3.PutBucketRequestPaymentInput, ...request.Option) (*s3.PutBucketRequestPaymentOutput, error) + PutBucketRequestPaymentRequest(*s3.PutBucketRequestPaymentInput) (*request.Request, *s3.PutBucketRequestPaymentOutput) + + PutBucketTagging(*s3.PutBucketTaggingInput) (*s3.PutBucketTaggingOutput, error) + PutBucketTaggingWithContext(aws.Context, *s3.PutBucketTaggingInput, ...request.Option) (*s3.PutBucketTaggingOutput, error) + PutBucketTaggingRequest(*s3.PutBucketTaggingInput) (*request.Request, *s3.PutBucketTaggingOutput) + + PutBucketVersioning(*s3.PutBucketVersioningInput) (*s3.PutBucketVersioningOutput, error) + PutBucketVersioningWithContext(aws.Context, *s3.PutBucketVersioningInput, ...request.Option) (*s3.PutBucketVersioningOutput, error) + PutBucketVersioningRequest(*s3.PutBucketVersioningInput) (*request.Request, *s3.PutBucketVersioningOutput) + + PutBucketWebsite(*s3.PutBucketWebsiteInput) (*s3.PutBucketWebsiteOutput, error) + PutBucketWebsiteWithContext(aws.Context, *s3.PutBucketWebsiteInput, ...request.Option) (*s3.PutBucketWebsiteOutput, error) + PutBucketWebsiteRequest(*s3.PutBucketWebsiteInput) (*request.Request, *s3.PutBucketWebsiteOutput) + + PutObject(*s3.PutObjectInput) (*s3.PutObjectOutput, error) + PutObjectWithContext(aws.Context, *s3.PutObjectInput, ...request.Option) (*s3.PutObjectOutput, error) + PutObjectRequest(*s3.PutObjectInput) (*request.Request, *s3.PutObjectOutput) + + PutObjectAcl(*s3.PutObjectAclInput) (*s3.PutObjectAclOutput, error) + PutObjectAclWithContext(aws.Context, *s3.PutObjectAclInput, ...request.Option) (*s3.PutObjectAclOutput, error) + PutObjectAclRequest(*s3.PutObjectAclInput) (*request.Request, *s3.PutObjectAclOutput) + + PutObjectLegalHold(*s3.PutObjectLegalHoldInput) (*s3.PutObjectLegalHoldOutput, error) + PutObjectLegalHoldWithContext(aws.Context, *s3.PutObjectLegalHoldInput, ...request.Option) (*s3.PutObjectLegalHoldOutput, error) + PutObjectLegalHoldRequest(*s3.PutObjectLegalHoldInput) (*request.Request, *s3.PutObjectLegalHoldOutput) + + PutObjectLockConfiguration(*s3.PutObjectLockConfigurationInput) (*s3.PutObjectLockConfigurationOutput, error) + PutObjectLockConfigurationWithContext(aws.Context, *s3.PutObjectLockConfigurationInput, ...request.Option) (*s3.PutObjectLockConfigurationOutput, error) + PutObjectLockConfigurationRequest(*s3.PutObjectLockConfigurationInput) (*request.Request, *s3.PutObjectLockConfigurationOutput) + + PutObjectRetention(*s3.PutObjectRetentionInput) (*s3.PutObjectRetentionOutput, error) + PutObjectRetentionWithContext(aws.Context, *s3.PutObjectRetentionInput, ...request.Option) (*s3.PutObjectRetentionOutput, error) + PutObjectRetentionRequest(*s3.PutObjectRetentionInput) (*request.Request, *s3.PutObjectRetentionOutput) + + PutObjectTagging(*s3.PutObjectTaggingInput) (*s3.PutObjectTaggingOutput, error) + PutObjectTaggingWithContext(aws.Context, *s3.PutObjectTaggingInput, ...request.Option) (*s3.PutObjectTaggingOutput, error) + PutObjectTaggingRequest(*s3.PutObjectTaggingInput) (*request.Request, *s3.PutObjectTaggingOutput) + + PutPublicAccessBlock(*s3.PutPublicAccessBlockInput) (*s3.PutPublicAccessBlockOutput, error) + PutPublicAccessBlockWithContext(aws.Context, *s3.PutPublicAccessBlockInput, ...request.Option) (*s3.PutPublicAccessBlockOutput, error) + PutPublicAccessBlockRequest(*s3.PutPublicAccessBlockInput) (*request.Request, *s3.PutPublicAccessBlockOutput) + + RestoreObject(*s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error) + RestoreObjectWithContext(aws.Context, *s3.RestoreObjectInput, ...request.Option) (*s3.RestoreObjectOutput, error) + RestoreObjectRequest(*s3.RestoreObjectInput) (*request.Request, *s3.RestoreObjectOutput) + + SelectObjectContent(*s3.SelectObjectContentInput) (*s3.SelectObjectContentOutput, error) + SelectObjectContentWithContext(aws.Context, *s3.SelectObjectContentInput, ...request.Option) (*s3.SelectObjectContentOutput, error) + SelectObjectContentRequest(*s3.SelectObjectContentInput) (*request.Request, *s3.SelectObjectContentOutput) + + UploadPart(*s3.UploadPartInput) (*s3.UploadPartOutput, error) + UploadPartWithContext(aws.Context, *s3.UploadPartInput, ...request.Option) (*s3.UploadPartOutput, error) + UploadPartRequest(*s3.UploadPartInput) (*request.Request, *s3.UploadPartOutput) + + UploadPartCopy(*s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error) + UploadPartCopyWithContext(aws.Context, *s3.UploadPartCopyInput, ...request.Option) (*s3.UploadPartCopyOutput, error) + UploadPartCopyRequest(*s3.UploadPartCopyInput) (*request.Request, *s3.UploadPartCopyOutput) + + WriteGetObjectResponse(*s3.WriteGetObjectResponseInput) (*s3.WriteGetObjectResponseOutput, error) + WriteGetObjectResponseWithContext(aws.Context, *s3.WriteGetObjectResponseInput, ...request.Option) (*s3.WriteGetObjectResponseOutput, error) + WriteGetObjectResponseRequest(*s3.WriteGetObjectResponseInput) (*request.Request, *s3.WriteGetObjectResponseOutput) + + WaitUntilBucketExists(*s3.HeadBucketInput) error + WaitUntilBucketExistsWithContext(aws.Context, *s3.HeadBucketInput, ...request.WaiterOption) error + + WaitUntilBucketNotExists(*s3.HeadBucketInput) error + WaitUntilBucketNotExistsWithContext(aws.Context, *s3.HeadBucketInput, ...request.WaiterOption) error + + WaitUntilObjectExists(*s3.HeadObjectInput) error + WaitUntilObjectExistsWithContext(aws.Context, *s3.HeadObjectInput, ...request.WaiterOption) error + + WaitUntilObjectNotExists(*s3.HeadObjectInput) error + WaitUntilObjectNotExistsWithContext(aws.Context, *s3.HeadObjectInput, ...request.WaiterOption) error +} + +var _ S3API = (*s3.S3)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/arn.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/arn.go new file mode 100644 index 000000000..f0a7f9bfc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/arn.go @@ -0,0 +1,23 @@ +package s3manager + +import ( + "fmt" + "github.com/aws/aws-sdk-go/aws/arn" +) + +func validateSupportedARNType(bucket string) error { + if !arn.IsARN(bucket) { + return nil + } + + parsedARN, err := arn.Parse(bucket) + if err != nil { + return err + } + + if parsedARN.Service == "s3-object-lambda" { + return fmt.Errorf("manager does not support s3-object-lambda service ARNs") + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go new file mode 100644 index 000000000..0804bca65 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go @@ -0,0 +1,532 @@ +package s3manager + +import ( + "bytes" + "fmt" + "io" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" +) + +const ( + // DefaultBatchSize is the batch size we initialize when constructing a batch delete client. + // This value is used when calling DeleteObjects. This represents how many objects to delete + // per DeleteObjects call. + DefaultBatchSize = 100 +) + +// BatchError will contain the key and bucket of the object that failed to +// either upload or download. +type BatchError struct { + Errors Errors + code string + message string +} + +// Errors is a typed alias for a slice of errors to satisfy the error +// interface. +type Errors []Error + +func (errs Errors) Error() string { + buf := bytes.NewBuffer(nil) + for i, err := range errs { + buf.WriteString(err.Error()) + if i+1 < len(errs) { + buf.WriteString("\n") + } + } + return buf.String() +} + +// Error will contain the original error, bucket, and key of the operation that failed +// during batch operations. +type Error struct { + OrigErr error + Bucket *string + Key *string +} + +func newError(err error, bucket, key *string) Error { + return Error{ + err, + bucket, + key, + } +} + +func (err *Error) Error() string { + origErr := "" + if err.OrigErr != nil { + origErr = ":\n" + err.OrigErr.Error() + } + return fmt.Sprintf("failed to perform batch operation on %q to %q%s", + aws.StringValue(err.Key), + aws.StringValue(err.Bucket), + origErr, + ) +} + +// NewBatchError will return a BatchError that satisfies the awserr.Error interface. +func NewBatchError(code, message string, err []Error) awserr.Error { + return &BatchError{ + Errors: err, + code: code, + message: message, + } +} + +// Code will return the code associated with the batch error. +func (err *BatchError) Code() string { + return err.code +} + +// Message will return the message associated with the batch error. +func (err *BatchError) Message() string { + return err.message +} + +func (err *BatchError) Error() string { + return awserr.SprintError(err.Code(), err.Message(), "", err.Errors) +} + +// OrigErr will return the original error. Which, in this case, will always be nil +// for batched operations. +func (err *BatchError) OrigErr() error { + return err.Errors +} + +// BatchDeleteIterator is an interface that uses the scanner pattern to +// iterate through what needs to be deleted. +type BatchDeleteIterator interface { + Next() bool + Err() error + DeleteObject() BatchDeleteObject +} + +// DeleteListIterator is an alternative iterator for the BatchDelete client. This will +// iterate through a list of objects and delete the objects. +// +// Example: +// +// iter := &s3manager.DeleteListIterator{ +// Client: svc, +// Input: &s3.ListObjectsInput{ +// Bucket: aws.String("bucket"), +// MaxKeys: aws.Int64(5), +// }, +// Paginator: request.Pagination{ +// NewRequest: func() (*request.Request, error) { +// var inCpy *ListObjectsInput +// if input != nil { +// tmp := *input +// inCpy = &tmp +// } +// req, _ := c.ListObjectsRequest(inCpy) +// return req, nil +// }, +// }, +// } +// +// batcher := s3manager.NewBatchDeleteWithClient(svc) +// if err := batcher.Delete(aws.BackgroundContext(), iter); err != nil { +// return err +// } +type DeleteListIterator struct { + Bucket *string + Paginator request.Pagination + objects []*s3.Object +} + +// NewDeleteListIterator will return a new DeleteListIterator. +func NewDeleteListIterator(svc s3iface.S3API, input *s3.ListObjectsInput, opts ...func(*DeleteListIterator)) BatchDeleteIterator { + iter := &DeleteListIterator{ + Bucket: input.Bucket, + Paginator: request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *s3.ListObjectsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := svc.ListObjectsRequest(inCpy) + return req, nil + }, + }, + } + + for _, opt := range opts { + opt(iter) + } + return iter +} + +// Next will use the S3API client to iterate through a list of objects. +func (iter *DeleteListIterator) Next() bool { + if len(iter.objects) > 0 { + iter.objects = iter.objects[1:] + } + + if len(iter.objects) == 0 && iter.Paginator.Next() { + iter.objects = iter.Paginator.Page().(*s3.ListObjectsOutput).Contents + } + + return len(iter.objects) > 0 +} + +// Err will return the last known error from Next. +func (iter *DeleteListIterator) Err() error { + return iter.Paginator.Err() +} + +// DeleteObject will return the current object to be deleted. +func (iter *DeleteListIterator) DeleteObject() BatchDeleteObject { + return BatchDeleteObject{ + Object: &s3.DeleteObjectInput{ + Bucket: iter.Bucket, + Key: iter.objects[0].Key, + }, + } +} + +// BatchDelete will use the s3 package's service client to perform a batch +// delete. +type BatchDelete struct { + Client s3iface.S3API + BatchSize int +} + +// NewBatchDeleteWithClient will return a new delete client that can delete a batched amount of +// objects. +// +// Example: +// +// batcher := s3manager.NewBatchDeleteWithClient(client, size) +// +// objects := []BatchDeleteObject{ +// { +// Object: &s3.DeleteObjectInput { +// Key: aws.String("key"), +// Bucket: aws.String("bucket"), +// }, +// }, +// } +// +// if err := batcher.Delete(aws.BackgroundContext(), &s3manager.DeleteObjectsIterator{ +// Objects: objects, +// }); err != nil { +// return err +// } +func NewBatchDeleteWithClient(client s3iface.S3API, options ...func(*BatchDelete)) *BatchDelete { + svc := &BatchDelete{ + Client: client, + BatchSize: DefaultBatchSize, + } + + for _, opt := range options { + opt(svc) + } + + return svc +} + +// NewBatchDelete will return a new delete client that can delete a batched amount of +// objects. +// +// Example: +// +// batcher := s3manager.NewBatchDelete(sess, size) +// +// objects := []BatchDeleteObject{ +// { +// Object: &s3.DeleteObjectInput { +// Key: aws.String("key"), +// Bucket: aws.String("bucket"), +// }, +// }, +// } +// +// if err := batcher.Delete(aws.BackgroundContext(), &s3manager.DeleteObjectsIterator{ +// Objects: objects, +// }); err != nil { +// return err +// } +func NewBatchDelete(c client.ConfigProvider, options ...func(*BatchDelete)) *BatchDelete { + client := s3.New(c) + return NewBatchDeleteWithClient(client, options...) +} + +// BatchDeleteObject is a wrapper object for calling the batch delete operation. +type BatchDeleteObject struct { + Object *s3.DeleteObjectInput + // After will run after each iteration during the batch process. This function will + // be executed whether or not the request was successful. + After func() error +} + +// DeleteObjectsIterator is an interface that uses the scanner pattern to iterate +// through a series of objects to be deleted. +type DeleteObjectsIterator struct { + Objects []BatchDeleteObject + index int + inc bool +} + +// Next will increment the default iterator's index and ensure that there +// is another object to iterator to. +func (iter *DeleteObjectsIterator) Next() bool { + if iter.inc { + iter.index++ + } else { + iter.inc = true + } + return iter.index < len(iter.Objects) +} + +// Err will return an error. Since this is just used to satisfy the BatchDeleteIterator interface +// this will only return nil. +func (iter *DeleteObjectsIterator) Err() error { + return nil +} + +// DeleteObject will return the BatchDeleteObject at the current batched index. +func (iter *DeleteObjectsIterator) DeleteObject() BatchDeleteObject { + object := iter.Objects[iter.index] + return object +} + +// Delete will use the iterator to queue up objects that need to be deleted. +// Once the batch size is met, this will call the deleteBatch function. +func (d *BatchDelete) Delete(ctx aws.Context, iter BatchDeleteIterator) error { + var errs []Error + objects := []BatchDeleteObject{} + var input *s3.DeleteObjectsInput + + for iter.Next() { + o := iter.DeleteObject() + + if input == nil { + input = initDeleteObjectsInput(o.Object) + } + + parity := hasParity(input, o) + if parity { + input.Delete.Objects = append(input.Delete.Objects, &s3.ObjectIdentifier{ + Key: o.Object.Key, + VersionId: o.Object.VersionId, + }) + objects = append(objects, o) + } + + if len(input.Delete.Objects) == d.BatchSize || !parity { + if err := deleteBatch(ctx, d, input, objects); err != nil { + errs = append(errs, err...) + } + + objects = objects[:0] + input = nil + + if !parity { + objects = append(objects, o) + input = initDeleteObjectsInput(o.Object) + input.Delete.Objects = append(input.Delete.Objects, &s3.ObjectIdentifier{ + Key: o.Object.Key, + VersionId: o.Object.VersionId, + }) + } + } + } + + // iter.Next() could return false (above) plus populate iter.Err() + if iter.Err() != nil { + errs = append(errs, newError(iter.Err(), nil, nil)) + } + + if input != nil && len(input.Delete.Objects) > 0 { + if err := deleteBatch(ctx, d, input, objects); err != nil { + errs = append(errs, err...) + } + } + + if len(errs) > 0 { + return NewBatchError("BatchedDeleteIncomplete", "some objects have failed to be deleted.", errs) + } + return nil +} + +func initDeleteObjectsInput(o *s3.DeleteObjectInput) *s3.DeleteObjectsInput { + return &s3.DeleteObjectsInput{ + Bucket: o.Bucket, + MFA: o.MFA, + RequestPayer: o.RequestPayer, + Delete: &s3.Delete{}, + } +} + +const ( + // ErrDeleteBatchFailCode represents an error code which will be returned + // only when DeleteObjects.Errors has an error that does not contain a code. + ErrDeleteBatchFailCode = "DeleteBatchError" + errDefaultDeleteBatchMessage = "failed to delete" +) + +// deleteBatch will delete a batch of items in the objects parameters. +func deleteBatch(ctx aws.Context, d *BatchDelete, input *s3.DeleteObjectsInput, objects []BatchDeleteObject) []Error { + errs := []Error{} + + if result, err := d.Client.DeleteObjectsWithContext(ctx, input); err != nil { + for i := 0; i < len(input.Delete.Objects); i++ { + errs = append(errs, newError(err, input.Bucket, input.Delete.Objects[i].Key)) + } + } else if len(result.Errors) > 0 { + for i := 0; i < len(result.Errors); i++ { + code := ErrDeleteBatchFailCode + msg := errDefaultDeleteBatchMessage + if result.Errors[i].Message != nil { + msg = *result.Errors[i].Message + } + if result.Errors[i].Code != nil { + code = *result.Errors[i].Code + } + + errs = append(errs, newError(awserr.New(code, msg, err), input.Bucket, result.Errors[i].Key)) + } + } + for _, object := range objects { + if object.After == nil { + continue + } + if err := object.After(); err != nil { + errs = append(errs, newError(err, object.Object.Bucket, object.Object.Key)) + } + } + + return errs +} + +func hasParity(o1 *s3.DeleteObjectsInput, o2 BatchDeleteObject) bool { + if o1.Bucket != nil && o2.Object.Bucket != nil { + if *o1.Bucket != *o2.Object.Bucket { + return false + } + } else if o1.Bucket != o2.Object.Bucket { + return false + } + + if o1.MFA != nil && o2.Object.MFA != nil { + if *o1.MFA != *o2.Object.MFA { + return false + } + } else if o1.MFA != o2.Object.MFA { + return false + } + + if o1.RequestPayer != nil && o2.Object.RequestPayer != nil { + if *o1.RequestPayer != *o2.Object.RequestPayer { + return false + } + } else if o1.RequestPayer != o2.Object.RequestPayer { + return false + } + + return true +} + +// BatchDownloadIterator is an interface that uses the scanner pattern to iterate +// through a series of objects to be downloaded. +type BatchDownloadIterator interface { + Next() bool + Err() error + DownloadObject() BatchDownloadObject +} + +// BatchDownloadObject contains all necessary information to run a batch operation once. +type BatchDownloadObject struct { + Object *s3.GetObjectInput + Writer io.WriterAt + // After will run after each iteration during the batch process. This function will + // be executed whether or not the request was successful. + After func() error +} + +// DownloadObjectsIterator implements the BatchDownloadIterator interface and allows for batched +// download of objects. +type DownloadObjectsIterator struct { + Objects []BatchDownloadObject + index int + inc bool +} + +// Next will increment the default iterator's index and ensure that there +// is another object to iterator to. +func (batcher *DownloadObjectsIterator) Next() bool { + if batcher.inc { + batcher.index++ + } else { + batcher.inc = true + } + return batcher.index < len(batcher.Objects) +} + +// DownloadObject will return the BatchDownloadObject at the current batched index. +func (batcher *DownloadObjectsIterator) DownloadObject() BatchDownloadObject { + object := batcher.Objects[batcher.index] + return object +} + +// Err will return an error. Since this is just used to satisfy the BatchDeleteIterator interface +// this will only return nil. +func (batcher *DownloadObjectsIterator) Err() error { + return nil +} + +// BatchUploadIterator is an interface that uses the scanner pattern to +// iterate through what needs to be uploaded. +type BatchUploadIterator interface { + Next() bool + Err() error + UploadObject() BatchUploadObject +} + +// UploadObjectsIterator implements the BatchUploadIterator interface and allows for batched +// upload of objects. +type UploadObjectsIterator struct { + Objects []BatchUploadObject + index int + inc bool +} + +// Next will increment the default iterator's index and ensure that there +// is another object to iterator to. +func (batcher *UploadObjectsIterator) Next() bool { + if batcher.inc { + batcher.index++ + } else { + batcher.inc = true + } + return batcher.index < len(batcher.Objects) +} + +// Err will return an error. Since this is just used to satisfy the BatchUploadIterator interface +// this will only return nil. +func (batcher *UploadObjectsIterator) Err() error { + return nil +} + +// UploadObject will return the BatchUploadObject at the current batched index. +func (batcher *UploadObjectsIterator) UploadObject() BatchUploadObject { + object := batcher.Objects[batcher.index] + return object +} + +// BatchUploadObject contains all necessary information to run a batch operation once. +type BatchUploadObject struct { + Object *UploadInput + // After will run after each iteration during the batch process. This function will + // be executed whether or not the request was successful. + After func() error +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go new file mode 100644 index 000000000..569474c28 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go @@ -0,0 +1,159 @@ +package s3manager + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" +) + +// GetBucketRegion will attempt to get the region for a bucket using the +// regionHint to determine which AWS partition to perform the query on. +// +// The request will not be signed, and will not use your AWS credentials. +// +// A "NotFound" error code will be returned if the bucket does not exist in the +// AWS partition the regionHint belongs to. If the regionHint parameter is an +// empty string GetBucketRegion will fallback to the ConfigProvider's region +// config. If the regionHint is empty, and the ConfigProvider does not have a +// region value, an error will be returned.. +// +// For example to get the region of a bucket which exists in "eu-central-1" +// you could provide a region hint of "us-west-2". +// +// sess := session.Must(session.NewSession()) +// +// bucket := "my-bucket" +// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2") +// if err != nil { +// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" { +// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket) +// } +// return err +// } +// fmt.Printf("Bucket %s is in %s region\n", bucket, region) +// +// By default the request will be made to the Amazon S3 endpoint using the Path +// style addressing. +// +// s3.us-west-2.amazonaws.com/bucketname +// +// This is not compatible with Amazon S3's FIPS endpoints. To override this +// behavior to use Virtual Host style addressing, provide a functional option +// that will set the Request's Config.S3ForcePathStyle to aws.Bool(false). +// +// region, err := s3manager.GetBucketRegion(ctx, sess, "bucketname", "us-west-2", func(r *request.Request) { +// r.S3ForcePathStyle = aws.Bool(false) +// }) +// +// To configure the GetBucketRegion to make a request via the Amazon +// S3 FIPS endpoints directly when a FIPS region name is not available, (e.g. +// fips-us-gov-west-1) set the Config.Endpoint on the Session, or client the +// utility is called with. The hint region will be ignored if an endpoint URL +// is configured on the session or client. +// +// sess, err := session.NewSession(&aws.Config{ +// Endpoint: aws.String("https://s3-fips.us-west-2.amazonaws.com"), +// }) +// +// region, err := s3manager.GetBucketRegion(context.Background(), sess, "bucketname", "") +func GetBucketRegion(ctx aws.Context, c client.ConfigProvider, bucket, regionHint string, opts ...request.Option) (string, error) { + var cfg aws.Config + if len(regionHint) != 0 { + cfg.Region = aws.String(regionHint) + } + svc := s3.New(c, &cfg) + return GetBucketRegionWithClient(ctx, svc, bucket, opts...) +} + +const bucketRegionHeader = "X-Amz-Bucket-Region" + +// GetBucketRegionWithClient is the same as GetBucketRegion with the exception +// that it takes a S3 service client instead of a Session. The regionHint is +// derived from the region the S3 service client was created in. +// +// By default the request will be made to the Amazon S3 endpoint using the Path +// style addressing. +// +// s3.us-west-2.amazonaws.com/bucketname +// +// This is not compatible with Amazon S3's FIPS endpoints. To override this +// behavior to use Virtual Host style addressing, provide a functional option +// that will set the Request's Config.S3ForcePathStyle to aws.Bool(false). +// +// region, err := s3manager.GetBucketRegionWithClient(ctx, client, "bucketname", func(r *request.Request) { +// r.S3ForcePathStyle = aws.Bool(false) +// }) +// +// To configure the GetBucketRegion to make a request via the Amazon +// S3 FIPS endpoints directly when a FIPS region name is not available, (e.g. +// fips-us-gov-west-1) set the Config.Endpoint on the Session, or client the +// utility is called with. The hint region will be ignored if an endpoint URL +// is configured on the session or client. +// +// region, err := s3manager.GetBucketRegionWithClient(context.Background(), +// s3.New(sess, &aws.Config{ +// Endpoint: aws.String("https://s3-fips.us-west-2.amazonaws.com"), +// }), +// "bucketname") +// +// See GetBucketRegion for more information. +func GetBucketRegionWithClient(ctx aws.Context, svc s3iface.S3API, bucket string, opts ...request.Option) (string, error) { + req, _ := svc.HeadBucketRequest(&s3.HeadBucketInput{ + Bucket: aws.String(bucket), + }) + req.Config.S3ForcePathStyle = aws.Bool(true) + + req.Config.Credentials = credentials.AnonymousCredentials + req.SetContext(ctx) + + // Disable HTTP redirects to prevent an invalid 301 from eating the response + // because Go's HTTP client will fail, and drop the response if an 301 is + // received without a location header. S3 will return a 301 without the + // location header for HeadObject API calls. + req.DisableFollowRedirects = true + + var bucketRegion string + req.Handlers.Send.PushBack(func(r *request.Request) { + bucketRegion = r.HTTPResponse.Header.Get(bucketRegionHeader) + if len(bucketRegion) == 0 { + return + } + r.HTTPResponse.StatusCode = 200 + r.HTTPResponse.Status = "OK" + r.Error = nil + }) + // Replace the endpoint validation handler to not require a region if an + // endpoint URL was specified. Since these requests are not authenticated, + // requiring a region is not needed when an endpoint URL is provided. + req.Handlers.Validate.Swap( + corehandlers.ValidateEndpointHandler.Name, + request.NamedHandler{ + Name: "validateEndpointWithoutRegion", + Fn: validateEndpointWithoutRegion, + }, + ) + + req.ApplyOptions(opts...) + + if err := req.Send(); err != nil { + return "", err + } + + bucketRegion = s3.NormalizeBucketLocation(bucketRegion) + + return bucketRegion, nil +} + +func validateEndpointWithoutRegion(r *request.Request) { + // Check if the caller provided an explicit URL instead of one derived by + // the SDK's endpoint resolver. For GetBucketRegion, with an explicit + // endpoint URL, a region is not needed. If no endpoint URL is provided, + // fallback the SDK's standard endpoint validation handler. + if len(aws.StringValue(r.Config.Endpoint)) == 0 { + corehandlers.ValidateEndpointHandler.Fn(r) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/buffered_read_seeker.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/buffered_read_seeker.go new file mode 100644 index 000000000..f1d9e85c7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/buffered_read_seeker.go @@ -0,0 +1,81 @@ +package s3manager + +import ( + "io" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// BufferedReadSeeker is buffered io.ReadSeeker +type BufferedReadSeeker struct { + r io.ReadSeeker + buffer []byte + readIdx, writeIdx int +} + +// NewBufferedReadSeeker returns a new BufferedReadSeeker +// if len(b) == 0 then the buffer will be initialized to 64 KiB. +func NewBufferedReadSeeker(r io.ReadSeeker, b []byte) *BufferedReadSeeker { + if len(b) == 0 { + b = make([]byte, 64*1024) + } + return &BufferedReadSeeker{r: r, buffer: b} +} + +func (b *BufferedReadSeeker) reset(r io.ReadSeeker) { + b.r = r + b.readIdx, b.writeIdx = 0, 0 +} + +// Read will read up len(p) bytes into p and will return +// the number of bytes read and any error that occurred. +// If the len(p) > the buffer size then a single read request +// will be issued to the underlying io.ReadSeeker for len(p) bytes. +// A Read request will at most perform a single Read to the underlying +// io.ReadSeeker, and may return < len(p) if serviced from the buffer. +func (b *BufferedReadSeeker) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return n, err + } + + if b.readIdx == b.writeIdx { + if len(p) >= len(b.buffer) { + n, err = b.r.Read(p) + return n, err + } + b.readIdx, b.writeIdx = 0, 0 + + n, err = b.r.Read(b.buffer) + if n == 0 { + return n, err + } + + b.writeIdx += n + } + + n = copy(p, b.buffer[b.readIdx:b.writeIdx]) + b.readIdx += n + + return n, err +} + +// Seek will position then underlying io.ReadSeeker to the given offset +// and will clear the buffer. +func (b *BufferedReadSeeker) Seek(offset int64, whence int) (int64, error) { + n, err := b.r.Seek(offset, whence) + + b.reset(b.r) + + return n, err +} + +// ReadAt will read up to len(p) bytes at the given file offset. +// This will result in the buffer being cleared. +func (b *BufferedReadSeeker) ReadAt(p []byte, off int64) (int, error) { + _, err := b.Seek(off, sdkio.SeekStart) + if err != nil { + return 0, err + } + + return b.Read(p) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to.go new file mode 100644 index 000000000..e013be27e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to.go @@ -0,0 +1,8 @@ +//go:build !windows +// +build !windows + +package s3manager + +func defaultUploadBufferProvider() ReadSeekerWriteToProvider { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to_windows.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to_windows.go new file mode 100644 index 000000000..687082c30 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to_windows.go @@ -0,0 +1,5 @@ +package s3manager + +func defaultUploadBufferProvider() ReadSeekerWriteToProvider { + return NewBufferedReadSeekerWriteToPool(1024 * 1024) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from.go new file mode 100644 index 000000000..5583c30a4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from.go @@ -0,0 +1,8 @@ +//go:build !windows +// +build !windows + +package s3manager + +func defaultDownloadBufferProvider() WriterReadFromProvider { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from_windows.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from_windows.go new file mode 100644 index 000000000..7e9d9579f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from_windows.go @@ -0,0 +1,5 @@ +package s3manager + +func defaultDownloadBufferProvider() WriterReadFromProvider { + return NewPooledBufferedWriterReadFromProvider(1024 * 1024) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go new file mode 100644 index 000000000..229c0d63b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go @@ -0,0 +1,3 @@ +// Package s3manager provides utilities to upload and download objects from +// S3 concurrently. Helpful for when working with large objects. +package s3manager diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go new file mode 100644 index 000000000..37eb7dd52 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go @@ -0,0 +1,608 @@ +package s3manager + +import ( + "fmt" + "io" + "net/http" + "strconv" + "strings" + "sync" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" +) + +// DefaultDownloadPartSize is the default range of bytes to get at a time when +// using Download(). +const DefaultDownloadPartSize = 1024 * 1024 * 5 + +// DefaultDownloadConcurrency is the default number of goroutines to spin up +// when using Download(). +const DefaultDownloadConcurrency = 5 + +type errReadingBody struct { + err error +} + +func (e *errReadingBody) Error() string { + return fmt.Sprintf("failed to read part body: %v", e.err) +} + +func (e *errReadingBody) Unwrap() error { + return e.err +} + +// The Downloader structure that calls Download(). It is safe to call Download() +// on this structure for multiple objects and across concurrent goroutines. +// Mutating the Downloader's properties is not safe to be done concurrently. +type Downloader struct { + // The size (in bytes) to request from S3 for each part. + // The minimum allowed part size is 5MB, and if this value is set to zero, + // the DefaultDownloadPartSize value will be used. + // + // PartSize is ignored if the Range input parameter is provided. + PartSize int64 + + // The number of goroutines to spin up in parallel when sending parts. + // If this is set to zero, the DefaultDownloadConcurrency value will be used. + // + // Concurrency of 1 will download the parts sequentially. + // + // Concurrency is ignored if the Range input parameter is provided. + Concurrency int + + // An S3 client to use when performing downloads. + S3 s3iface.S3API + + // List of request options that will be passed down to individual API + // operation requests made by the downloader. + RequestOptions []request.Option + + // Defines the buffer strategy used when downloading a part. + // + // If a WriterReadFromProvider is given the Download manager + // will pass the io.WriterAt of the Download request to the provider + // and will use the returned WriterReadFrom from the provider as the + // destination writer when copying from http response body. + BufferProvider WriterReadFromProvider +} + +// WithDownloaderRequestOptions appends to the Downloader's API request options. +func WithDownloaderRequestOptions(opts ...request.Option) func(*Downloader) { + return func(d *Downloader) { + d.RequestOptions = append(d.RequestOptions, opts...) + } +} + +// NewDownloader creates a new Downloader instance to downloads objects from +// S3 in concurrent chunks. Pass in additional functional options to customize +// the downloader behavior. Requires a client.ConfigProvider in order to create +// a S3 service client. The session.Session satisfies the client.ConfigProvider +// interface. +// +// Example: +// +// // The session the S3 Downloader will use +// sess := session.Must(session.NewSession()) +// +// // Create a downloader with the session and default options +// downloader := s3manager.NewDownloader(sess) +// +// // Create a downloader with the session and custom options +// downloader := s3manager.NewDownloader(sess, func(d *s3manager.Downloader) { +// d.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downloader { + return newDownloader(s3.New(c), options...) +} + +func newDownloader(client s3iface.S3API, options ...func(*Downloader)) *Downloader { + d := &Downloader{ + S3: client, + PartSize: DefaultDownloadPartSize, + Concurrency: DefaultDownloadConcurrency, + BufferProvider: defaultDownloadBufferProvider(), + } + for _, option := range options { + option(d) + } + + return d +} + +// NewDownloaderWithClient creates a new Downloader instance to downloads +// objects from S3 in concurrent chunks. Pass in additional functional +// options to customize the downloader behavior. Requires a S3 service client +// to make S3 API calls. +// +// Example: +// +// // The session the S3 Downloader will use +// sess := session.Must(session.NewSession()) +// +// // The S3 client the S3 Downloader will use +// s3Svc := s3.New(sess) +// +// // Create a downloader with the s3 client and default options +// downloader := s3manager.NewDownloaderWithClient(s3Svc) +// +// // Create a downloader with the s3 client and custom options +// downloader := s3manager.NewDownloaderWithClient(s3Svc, func(d *s3manager.Downloader) { +// d.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewDownloaderWithClient(svc s3iface.S3API, options ...func(*Downloader)) *Downloader { + return newDownloader(svc, options...) +} + +type maxRetrier interface { + MaxRetries() int +} + +// Download downloads an object in S3 and writes the payload into w using +// concurrent GET requests. The n int64 returned is the size of the object downloaded +// in bytes. +// +// Additional functional options can be provided to configure the individual +// download. These options are copies of the Downloader instance Download is called from. +// Modifying the options will not impact the original Downloader instance. +// +// It is safe to call this method concurrently across goroutines. +// +// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent +// downloads, or in memory []byte wrapper using aws.WriteAtBuffer. +// +// Specifying a Downloader.Concurrency of 1 will cause the Downloader to +// download the parts from S3 sequentially. +// +// If the GetObjectInput's Range value is provided that will cause the downloader +// to perform a single GetObjectInput request for that object's range. This will +// caused the part size, and concurrency configurations to be ignored. +func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) { + return d.DownloadWithContext(aws.BackgroundContext(), w, input, options...) +} + +// DownloadWithContext downloads an object in S3 and writes the payload into w +// using concurrent GET requests. The n int64 returned is the size of the object downloaded +// in bytes. +// +// DownloadWithContext is the same as Download with the additional support for +// Context input parameters. The Context must not be nil. A nil Context will +// cause a panic. Use the Context to add deadlining, timeouts, etc. The +// DownloadWithContext may create sub-contexts for individual underlying +// requests. +// +// Additional functional options can be provided to configure the individual +// download. These options are copies of the Downloader instance Download is +// called from. Modifying the options will not impact the original Downloader +// instance. Use the WithDownloaderRequestOptions helper function to pass in request +// options that will be applied to all API operations made with this downloader. +// +// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent +// downloads, or in memory []byte wrapper using aws.WriteAtBuffer. +// +// Specifying a Downloader.Concurrency of 1 will cause the Downloader to +// download the parts from S3 sequentially. +// +// It is safe to call this method concurrently across goroutines. +// +// If the GetObjectInput's Range value is provided that will cause the downloader +// to perform a single GetObjectInput request for that object's range. This will +// caused the part size, and concurrency configurations to be ignored. +func (d Downloader) DownloadWithContext(ctx aws.Context, w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) { + if err := validateSupportedARNType(aws.StringValue(input.Bucket)); err != nil { + return 0, err + } + + impl := downloader{w: w, in: input, cfg: d, ctx: ctx} + + for _, option := range options { + option(&impl.cfg) + } + impl.cfg.RequestOptions = append(impl.cfg.RequestOptions, request.WithAppendUserAgent("S3Manager")) + + if s, ok := d.S3.(maxRetrier); ok { + impl.partBodyMaxRetries = s.MaxRetries() + } + + impl.totalBytes = -1 + if impl.cfg.Concurrency == 0 { + impl.cfg.Concurrency = DefaultDownloadConcurrency + } + + if impl.cfg.PartSize == 0 { + impl.cfg.PartSize = DefaultDownloadPartSize + } + + return impl.download() +} + +// DownloadWithIterator will download a batched amount of objects in S3 and writes them +// to the io.WriterAt specificed in the iterator. +// +// Example: +// +// svc := s3manager.NewDownloader(session) +// +// fooFile, err := os.Open("/tmp/foo.file") +// if err != nil { +// return err +// } +// +// barFile, err := os.Open("/tmp/bar.file") +// if err != nil { +// return err +// } +// +// objects := []s3manager.BatchDownloadObject { +// { +// Object: &s3.GetObjectInput { +// Bucket: aws.String("bucket"), +// Key: aws.String("foo"), +// }, +// Writer: fooFile, +// }, +// { +// Object: &s3.GetObjectInput { +// Bucket: aws.String("bucket"), +// Key: aws.String("bar"), +// }, +// Writer: barFile, +// }, +// } +// +// iter := &s3manager.DownloadObjectsIterator{Objects: objects} +// if err := svc.DownloadWithIterator(aws.BackgroundContext(), iter); err != nil { +// return err +// } +func (d Downloader) DownloadWithIterator(ctx aws.Context, iter BatchDownloadIterator, opts ...func(*Downloader)) error { + var errs []Error + for iter.Next() { + object := iter.DownloadObject() + if _, err := d.DownloadWithContext(ctx, object.Writer, object.Object, opts...); err != nil { + errs = append(errs, newError(err, object.Object.Bucket, object.Object.Key)) + } + + if object.After == nil { + continue + } + + if err := object.After(); err != nil { + errs = append(errs, newError(err, object.Object.Bucket, object.Object.Key)) + } + } + + if len(errs) > 0 { + return NewBatchError("BatchedDownloadIncomplete", "some objects have failed to download.", errs) + } + return nil +} + +// downloader is the implementation structure used internally by Downloader. +type downloader struct { + ctx aws.Context + cfg Downloader + + in *s3.GetObjectInput + w io.WriterAt + + wg sync.WaitGroup + m sync.Mutex + + pos int64 + totalBytes int64 + written int64 + err error + + partBodyMaxRetries int +} + +// download performs the implementation of the object download across ranged +// GETs. +func (d *downloader) download() (n int64, err error) { + // If range is specified fall back to single download of that range + // this enables the functionality of ranged gets with the downloader but + // at the cost of no multipart downloads. + if rng := aws.StringValue(d.in.Range); len(rng) > 0 { + d.downloadRange(rng) + return d.written, d.err + } + + // Spin off first worker to check additional header information + d.getChunk() + + if total := d.getTotalBytes(); total >= 0 { + // Spin up workers + ch := make(chan dlchunk, d.cfg.Concurrency) + + for i := 0; i < d.cfg.Concurrency; i++ { + d.wg.Add(1) + go d.downloadPart(ch) + } + + // Assign work + for d.getErr() == nil { + if d.pos >= total { + break // We're finished queuing chunks + } + + // Queue the next range of bytes to read. + ch <- dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize} + d.pos += d.cfg.PartSize + } + + // Wait for completion + close(ch) + d.wg.Wait() + } else { + // Checking if we read anything new + for d.err == nil { + d.getChunk() + } + + // We expect a 416 error letting us know we are done downloading the + // total bytes. Since we do not know the content's length, this will + // keep grabbing chunks of data until the range of bytes specified in + // the request is out of range of the content. Once, this happens, a + // 416 should occur. + e, ok := d.err.(awserr.RequestFailure) + if ok && e.StatusCode() == http.StatusRequestedRangeNotSatisfiable { + d.err = nil + } + } + + // Return error + return d.written, d.err +} + +// downloadPart is an individual goroutine worker reading from the ch channel +// and performing a GetObject request on the data with a given byte range. +// +// If this is the first worker, this operation also resolves the total number +// of bytes to be read so that the worker manager knows when it is finished. +func (d *downloader) downloadPart(ch chan dlchunk) { + defer d.wg.Done() + for { + chunk, ok := <-ch + if !ok { + break + } + if d.getErr() != nil { + // Drain the channel if there is an error, to prevent deadlocking + // of download producer. + continue + } + + if err := d.downloadChunk(chunk); err != nil { + d.setErr(err) + } + } +} + +// getChunk grabs a chunk of data from the body. +// Not thread safe. Should only used when grabbing data on a single thread. +func (d *downloader) getChunk() { + if d.getErr() != nil { + return + } + + chunk := dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize} + d.pos += d.cfg.PartSize + + if err := d.downloadChunk(chunk); err != nil { + d.setErr(err) + } +} + +// downloadRange downloads an Object given the passed in Byte-Range value. +// The chunk used down download the range will be configured for that range. +func (d *downloader) downloadRange(rng string) { + if d.getErr() != nil { + return + } + + chunk := dlchunk{w: d.w, start: d.pos} + // Ranges specified will short circuit the multipart download + chunk.withRange = rng + + if err := d.downloadChunk(chunk); err != nil { + d.setErr(err) + } + + // Update the position based on the amount of data received. + d.pos = d.written +} + +// downloadChunk downloads the chunk from s3 +func (d *downloader) downloadChunk(chunk dlchunk) error { + in := &s3.GetObjectInput{} + awsutil.Copy(in, d.in) + + // Get the next byte range of data + in.Range = aws.String(chunk.ByteRange()) + + var n int64 + var err error + for retry := 0; retry <= d.partBodyMaxRetries; retry++ { + n, err = d.tryDownloadChunk(in, &chunk) + if err == nil { + break + } + // Check if the returned error is an errReadingBody. + // If err is errReadingBody this indicates that an error + // occurred while copying the http response body. + // If this occurs we unwrap the err to set the underlying error + // and attempt any remaining retries. + if bodyErr, ok := err.(*errReadingBody); ok { + err = bodyErr.Unwrap() + } else { + return err + } + + chunk.cur = 0 + logMessage(d.cfg.S3, aws.LogDebugWithRequestRetries, + fmt.Sprintf("DEBUG: object part body download interrupted %s, err, %v, retrying attempt %d", + aws.StringValue(in.Key), err, retry)) + } + + d.incrWritten(n) + + return err +} + +func (d *downloader) tryDownloadChunk(in *s3.GetObjectInput, w io.Writer) (int64, error) { + cleanup := func() {} + if d.cfg.BufferProvider != nil { + w, cleanup = d.cfg.BufferProvider.GetReadFrom(w) + } + defer cleanup() + + resp, err := d.cfg.S3.GetObjectWithContext(d.ctx, in, d.cfg.RequestOptions...) + if err != nil { + return 0, err + } + d.setTotalBytes(resp) // Set total if not yet set. + + var src io.Reader = resp.Body + if d.cfg.BufferProvider != nil { + src = &suppressWriterAt{suppressed: src} + } + n, err := io.Copy(w, src) + resp.Body.Close() + if err != nil { + return n, &errReadingBody{err: err} + } + + return n, nil +} + +func logMessage(svc s3iface.S3API, level aws.LogLevelType, msg string) { + s, ok := svc.(*s3.S3) + if !ok { + return + } + + if s.Config.Logger == nil { + return + } + + if s.Config.LogLevel.Matches(level) { + s.Config.Logger.Log(msg) + } +} + +// getTotalBytes is a thread-safe getter for retrieving the total byte status. +func (d *downloader) getTotalBytes() int64 { + d.m.Lock() + defer d.m.Unlock() + + return d.totalBytes +} + +// setTotalBytes is a thread-safe setter for setting the total byte status. +// Will extract the object's total bytes from the Content-Range if the file +// will be chunked, or Content-Length. Content-Length is used when the response +// does not include a Content-Range. Meaning the object was not chunked. This +// occurs when the full file fits within the PartSize directive. +func (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) { + d.m.Lock() + defer d.m.Unlock() + + if d.totalBytes >= 0 { + return + } + + if resp.ContentRange == nil { + // ContentRange is nil when the full file contents is provided, and + // is not chunked. Use ContentLength instead. + if resp.ContentLength != nil { + d.totalBytes = *resp.ContentLength + return + } + } else { + parts := strings.Split(*resp.ContentRange, "/") + + total := int64(-1) + var err error + // Checking for whether or not a numbered total exists + // If one does not exist, we will assume the total to be -1, undefined, + // and sequentially download each chunk until hitting a 416 error + totalStr := parts[len(parts)-1] + if totalStr != "*" { + total, err = strconv.ParseInt(totalStr, 10, 64) + if err != nil { + d.err = err + return + } + } + + d.totalBytes = total + } +} + +func (d *downloader) incrWritten(n int64) { + d.m.Lock() + defer d.m.Unlock() + + d.written += n +} + +// getErr is a thread-safe getter for the error object +func (d *downloader) getErr() error { + d.m.Lock() + defer d.m.Unlock() + + return d.err +} + +// setErr is a thread-safe setter for the error object +func (d *downloader) setErr(e error) { + d.m.Lock() + defer d.m.Unlock() + + d.err = e +} + +// dlchunk represents a single chunk of data to write by the worker routine. +// This structure also implements an io.SectionReader style interface for +// io.WriterAt, effectively making it an io.SectionWriter (which does not +// exist). +type dlchunk struct { + w io.WriterAt + start int64 + size int64 + cur int64 + + // specifies the byte range the chunk should be downloaded with. + withRange string +} + +// Write wraps io.WriterAt for the dlchunk, writing from the dlchunk's start +// position to its end (or EOF). +// +// If a range is specified on the dlchunk the size will be ignored when writing. +// as the total size may not of be known ahead of time. +func (c *dlchunk) Write(p []byte) (n int, err error) { + if c.cur >= c.size && len(c.withRange) == 0 { + return 0, io.EOF + } + + n, err = c.w.WriteAt(p, c.start+c.cur) + c.cur += int64(n) + + return +} + +// ByteRange returns a HTTP Byte-Range header value that should be used by the +// client to request the chunk's range. +func (c *dlchunk) ByteRange() string { + if len(c.withRange) != 0 { + return c.withRange + } + + return fmt.Sprintf("bytes=%d-%d", c.start, c.start+c.size-1) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/pool.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/pool.go new file mode 100644 index 000000000..f6f27fc48 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/pool.go @@ -0,0 +1,252 @@ +package s3manager + +import ( + "fmt" + "sync" + + "github.com/aws/aws-sdk-go/aws" +) + +type byteSlicePool interface { + Get(aws.Context) (*[]byte, error) + Put(*[]byte) + ModifyCapacity(int) + SliceSize() int64 + Close() +} + +type maxSlicePool struct { + // allocator is defined as a function pointer to allow + // for test cases to instrument custom tracers when allocations + // occur. + allocator sliceAllocator + + slices chan *[]byte + allocations chan struct{} + capacityChange chan struct{} + + max int + sliceSize int64 + + mtx sync.RWMutex +} + +func newMaxSlicePool(sliceSize int64) *maxSlicePool { + p := &maxSlicePool{sliceSize: sliceSize} + p.allocator = p.newSlice + + return p +} + +var errZeroCapacity = fmt.Errorf("get called on zero capacity pool") + +func (p *maxSlicePool) Get(ctx aws.Context) (*[]byte, error) { + // check if context is canceled before attempting to get a slice + // this ensures priority is given to the cancel case first + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + p.mtx.RLock() + + for { + select { + case bs, ok := <-p.slices: + p.mtx.RUnlock() + if !ok { + // attempt to get on a zero capacity pool + return nil, errZeroCapacity + } + return bs, nil + case <-ctx.Done(): + p.mtx.RUnlock() + return nil, ctx.Err() + default: + // pass + } + + select { + case _, ok := <-p.allocations: + p.mtx.RUnlock() + if !ok { + // attempt to get on a zero capacity pool + return nil, errZeroCapacity + } + return p.allocator(), nil + case <-ctx.Done(): + p.mtx.RUnlock() + return nil, ctx.Err() + default: + // In the event that there are no slices or allocations available + // This prevents some deadlock situations that can occur around sync.RWMutex + // When a lock request occurs on ModifyCapacity, no new readers are allowed to acquire a read lock. + // By releasing the read lock here and waiting for a notification, we prevent a deadlock situation where + // Get could hold the read lock indefinitely waiting for capacity, ModifyCapacity is waiting for a write lock, + // and a Put is blocked trying to get a read-lock which is blocked by ModifyCapacity. + + // Short-circuit if the pool capacity is zero. + if p.max == 0 { + p.mtx.RUnlock() + return nil, errZeroCapacity + } + + // Since we will be releasing the read-lock we need to take the reference to the channel. + // Since channels are references we will still get notified if slices are added, or if + // the channel is closed due to a capacity modification. This specifically avoids a data race condition + // where ModifyCapacity both closes a channel and initializes a new one while we don't have a read-lock. + c := p.capacityChange + + p.mtx.RUnlock() + + select { + case _ = <-c: + p.mtx.RLock() + case <-ctx.Done(): + return nil, ctx.Err() + } + } + } +} + +func (p *maxSlicePool) Put(bs *[]byte) { + p.mtx.RLock() + defer p.mtx.RUnlock() + + if p.max == 0 { + return + } + + select { + case p.slices <- bs: + p.notifyCapacity() + default: + // If the new channel when attempting to add the slice then we drop the slice. + // The logic here is to prevent a deadlock situation if channel is already at max capacity. + // Allows us to reap allocations that are returned and are no longer needed. + } +} + +func (p *maxSlicePool) ModifyCapacity(delta int) { + if delta == 0 { + return + } + + p.mtx.Lock() + defer p.mtx.Unlock() + + p.max += delta + + if p.max == 0 { + p.empty() + return + } + + if p.capacityChange != nil { + close(p.capacityChange) + } + p.capacityChange = make(chan struct{}, p.max) + + origAllocations := p.allocations + p.allocations = make(chan struct{}, p.max) + + newAllocs := len(origAllocations) + delta + for i := 0; i < newAllocs; i++ { + p.allocations <- struct{}{} + } + + if origAllocations != nil { + close(origAllocations) + } + + origSlices := p.slices + p.slices = make(chan *[]byte, p.max) + if origSlices == nil { + return + } + + close(origSlices) + for bs := range origSlices { + select { + case p.slices <- bs: + default: + // If the new channel blocks while adding slices from the old channel + // then we drop the slice. The logic here is to prevent a deadlock situation + // if the new channel has a smaller capacity then the old. + } + } +} + +func (p *maxSlicePool) notifyCapacity() { + select { + case p.capacityChange <- struct{}{}: + default: + // This *shouldn't* happen as the channel is both buffered to the max pool capacity size and is resized + // on capacity modifications. This is just a safety to ensure that a blocking situation can't occur. + } +} + +func (p *maxSlicePool) SliceSize() int64 { + return p.sliceSize +} + +func (p *maxSlicePool) Close() { + p.mtx.Lock() + defer p.mtx.Unlock() + p.empty() +} + +func (p *maxSlicePool) empty() { + p.max = 0 + + if p.capacityChange != nil { + close(p.capacityChange) + p.capacityChange = nil + } + + if p.allocations != nil { + close(p.allocations) + for range p.allocations { + // drain channel + } + p.allocations = nil + } + + if p.slices != nil { + close(p.slices) + for range p.slices { + // drain channel + } + p.slices = nil + } +} + +func (p *maxSlicePool) newSlice() *[]byte { + bs := make([]byte, p.sliceSize) + return &bs +} + +type returnCapacityPoolCloser struct { + byteSlicePool + returnCapacity int +} + +func (n *returnCapacityPoolCloser) ModifyCapacity(delta int) { + if delta > 0 { + n.returnCapacity = -1 * delta + } + n.byteSlicePool.ModifyCapacity(delta) +} + +func (n *returnCapacityPoolCloser) Close() { + if n.returnCapacity < 0 { + n.byteSlicePool.ModifyCapacity(n.returnCapacity) + } +} + +type sliceAllocator func() *[]byte + +var newByteSlicePool = func(sliceSize int64) byteSlicePool { + return newMaxSlicePool(sliceSize) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/read_seeker_write_to.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/read_seeker_write_to.go new file mode 100644 index 000000000..f62e1a45e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/read_seeker_write_to.go @@ -0,0 +1,65 @@ +package s3manager + +import ( + "io" + "sync" +) + +// ReadSeekerWriteTo defines an interface implementing io.WriteTo and io.ReadSeeker +type ReadSeekerWriteTo interface { + io.ReadSeeker + io.WriterTo +} + +// BufferedReadSeekerWriteTo wraps a BufferedReadSeeker with an io.WriteAt +// implementation. +type BufferedReadSeekerWriteTo struct { + *BufferedReadSeeker +} + +// WriteTo writes to the given io.Writer from BufferedReadSeeker until there's no more data to write or +// an error occurs. Returns the number of bytes written and any error encountered during the write. +func (b *BufferedReadSeekerWriteTo) WriteTo(writer io.Writer) (int64, error) { + return io.Copy(writer, b.BufferedReadSeeker) +} + +// ReadSeekerWriteToProvider provides an implementation of io.WriteTo for an io.ReadSeeker +type ReadSeekerWriteToProvider interface { + GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func()) +} + +// BufferedReadSeekerWriteToPool uses a sync.Pool to create and reuse +// []byte slices for buffering parts in memory +type BufferedReadSeekerWriteToPool struct { + pool sync.Pool +} + +// NewBufferedReadSeekerWriteToPool will return a new BufferedReadSeekerWriteToPool that will create +// a pool of reusable buffers . If size is less then < 64 KiB then the buffer +// will default to 64 KiB. Reason: io.Copy from writers or readers that don't support io.WriteTo or io.ReadFrom +// respectively will default to copying 32 KiB. +func NewBufferedReadSeekerWriteToPool(size int) *BufferedReadSeekerWriteToPool { + if size < 65536 { + size = 65536 + } + + return &BufferedReadSeekerWriteToPool{ + pool: sync.Pool{New: func() interface{} { + return make([]byte, size) + }}, + } +} + +// GetWriteTo will wrap the provided io.ReadSeeker with a BufferedReadSeekerWriteTo. +// The provided cleanup must be called after operations have been completed on the +// returned io.ReadSeekerWriteTo in order to signal the return of resources to the pool. +func (p *BufferedReadSeekerWriteToPool) GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func()) { + buffer := p.pool.Get().([]byte) + + r = &BufferedReadSeekerWriteTo{BufferedReadSeeker: NewBufferedReadSeeker(seeker, buffer)} + cleanup = func() { + p.pool.Put(buffer) + } + + return r, cleanup +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface/interface.go new file mode 100644 index 000000000..11c5697f3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface/interface.go @@ -0,0 +1,46 @@ +// Package s3manageriface provides an interface for the s3manager package +package s3manageriface + +import ( + "io" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" +) + +var _ DownloaderAPI = (*s3manager.Downloader)(nil) + +// DownloaderAPI is the interface type for s3manager.Downloader. +type DownloaderAPI interface { + Download(io.WriterAt, *s3.GetObjectInput, ...func(*s3manager.Downloader)) (int64, error) + DownloadWithContext(aws.Context, io.WriterAt, *s3.GetObjectInput, ...func(*s3manager.Downloader)) (int64, error) +} + +// DownloadWithIterator is the interface type for the contained method of the same name. +type DownloadWithIterator interface { + DownloadWithIterator(aws.Context, s3manager.BatchDownloadIterator, ...func(*s3manager.Downloader)) error +} + +var _ UploaderAPI = (*s3manager.Uploader)(nil) +var _ UploadWithIterator = (*s3manager.Uploader)(nil) + +// UploaderAPI is the interface type for s3manager.Uploader. +type UploaderAPI interface { + Upload(*s3manager.UploadInput, ...func(*s3manager.Uploader)) (*s3manager.UploadOutput, error) + UploadWithContext(aws.Context, *s3manager.UploadInput, ...func(*s3manager.Uploader)) (*s3manager.UploadOutput, error) +} + +// UploadWithIterator is the interface for uploading objects to S3 using the S3 +// upload manager. +type UploadWithIterator interface { + UploadWithIterator(aws.Context, s3manager.BatchUploadIterator, ...func(*s3manager.Uploader)) error +} + +var _ BatchDelete = (*s3manager.BatchDelete)(nil) + +// BatchDelete is the interface type for batch deleting objects from S3 using +// the S3 manager. (separated for user to compose). +type BatchDelete interface { + Delete(aws.Context, s3manager.BatchDeleteIterator) error +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go new file mode 100644 index 000000000..da243a302 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go @@ -0,0 +1,797 @@ +package s3manager + +import ( + "bytes" + "fmt" + "io" + "sort" + "sync" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" +) + +// MaxUploadParts is the maximum allowed number of parts in a multi-part upload +// on Amazon S3. +const MaxUploadParts = 10000 + +// MinUploadPartSize is the minimum allowed part size when uploading a part to +// Amazon S3. +const MinUploadPartSize int64 = 1024 * 1024 * 5 + +// DefaultUploadPartSize is the default part size to buffer chunks of a +// payload into. +const DefaultUploadPartSize = MinUploadPartSize + +// DefaultUploadConcurrency is the default number of goroutines to spin up when +// using Upload(). +const DefaultUploadConcurrency = 5 + +// A MultiUploadFailure wraps a failed S3 multipart upload. An error returned +// will satisfy this interface when a multi part upload failed to upload all +// chucks to S3. In the case of a failure the UploadID is needed to operate on +// the chunks, if any, which were uploaded. +// +// Example: +// +// u := s3manager.NewUploader(opts) +// output, err := u.upload(input) +// if err != nil { +// if multierr, ok := err.(s3manager.MultiUploadFailure); ok { +// // Process error and its associated uploadID +// fmt.Println("Error:", multierr.Code(), multierr.Message(), multierr.UploadID()) +// } else { +// // Process error generically +// fmt.Println("Error:", err.Error()) +// } +// } +type MultiUploadFailure interface { + awserr.Error + + // Returns the upload id for the S3 multipart upload that failed. + UploadID() string +} + +// So that the Error interface type can be included as an anonymous field +// in the multiUploadError struct and not conflict with the error.Error() method. +type awsError awserr.Error + +// A multiUploadError wraps the upload ID of a failed s3 multipart upload. +// Composed of BaseError for code, message, and original error +// +// Should be used for an error that occurred failing a S3 multipart upload, +// and a upload ID is available. If an uploadID is not available a more relevant +type multiUploadError struct { + awsError + + // ID for multipart upload which failed. + uploadID string +} + +// Error returns the string representation of the error. +// +// # See apierr.BaseError ErrorWithExtra for output format +// +// Satisfies the error interface. +func (m multiUploadError) Error() string { + extra := fmt.Sprintf("upload id: %s", m.uploadID) + return awserr.SprintError(m.Code(), m.Message(), extra, m.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (m multiUploadError) String() string { + return m.Error() +} + +// UploadID returns the id of the S3 upload which failed. +func (m multiUploadError) UploadID() string { + return m.uploadID +} + +// UploadOutput represents a response from the Upload() call. +type UploadOutput struct { + // The URL where the object was uploaded to. + Location string + + // The version of the object that was uploaded. Will only be populated if + // the S3 Bucket is versioned. If the bucket is not versioned this field + // will not be set. + VersionID *string + + // The ID for a multipart upload to S3. In the case of an error the error + // can be cast to the MultiUploadFailure interface to extract the upload ID. + UploadID string + + // Entity tag of the object. + ETag *string +} + +// WithUploaderRequestOptions appends to the Uploader's API request options. +func WithUploaderRequestOptions(opts ...request.Option) func(*Uploader) { + return func(u *Uploader) { + u.RequestOptions = append(u.RequestOptions, opts...) + } +} + +// The Uploader structure that calls Upload(). It is safe to call Upload() +// on this structure for multiple objects and across concurrent goroutines. +// Mutating the Uploader's properties is not safe to be done concurrently. +// +// The ContentMD5 member for pre-computed MD5 checksums will be ignored for +// multipart uploads. Objects that will be uploaded in a single part, the +// ContentMD5 will be used. +// +// The Checksum members for pre-computed checksums will be ignored for +// multipart uploads. Objects that will be uploaded in a single part, will +// include the checksum member in the request. +type Uploader struct { + // The buffer size (in bytes) to use when buffering data into chunks and + // sending them as parts to S3. The minimum allowed part size is 5MB, and + // if this value is set to zero, the DefaultUploadPartSize value will be used. + PartSize int64 + + // The number of goroutines to spin up in parallel per call to Upload when + // sending parts. If this is set to zero, the DefaultUploadConcurrency value + // will be used. + // + // The concurrency pool is not shared between calls to Upload. + Concurrency int + + // Setting this value to true will cause the SDK to avoid calling + // AbortMultipartUpload on a failure, leaving all successfully uploaded + // parts on S3 for manual recovery. + // + // Note that storing parts of an incomplete multipart upload counts towards + // space usage on S3 and will add additional costs if not cleaned up. + LeavePartsOnError bool + + // MaxUploadParts is the max number of parts which will be uploaded to S3. + // Will be used to calculate the partsize of the object to be uploaded. + // E.g: 5GB file, with MaxUploadParts set to 100, will upload the file + // as 100, 50MB parts. With a limited of s3.MaxUploadParts (10,000 parts). + // + // MaxUploadParts must not be used to limit the total number of bytes uploaded. + // Use a type like to io.LimitReader (https://golang.org/pkg/io/#LimitedReader) + // instead. An io.LimitReader is helpful when uploading an unbounded reader + // to S3, and you know its maximum size. Otherwise the reader's io.EOF returned + // error must be used to signal end of stream. + // + // Defaults to package const's MaxUploadParts value. + MaxUploadParts int + + // The client to use when uploading to S3. + S3 s3iface.S3API + + // List of request options that will be passed down to individual API + // operation requests made by the uploader. + RequestOptions []request.Option + + // Defines the buffer strategy used when uploading a part + BufferProvider ReadSeekerWriteToProvider + + // partPool allows for the re-usage of streaming payload part buffers between upload calls + partPool byteSlicePool +} + +// NewUploader creates a new Uploader instance to upload objects to S3. Pass In +// additional functional options to customize the uploader's behavior. Requires a +// client.ConfigProvider in order to create a S3 service client. The session.Session +// satisfies the client.ConfigProvider interface. +// +// Example: +// +// // The session the S3 Uploader will use +// sess := session.Must(session.NewSession()) +// +// // Create an uploader with the session and default options +// uploader := s3manager.NewUploader(sess) +// +// // Create an uploader with the session and custom options +// uploader := s3manager.NewUploader(session, func(u *s3manager.Uploader) { +// u.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader { + return newUploader(s3.New(c), options...) +} + +func newUploader(client s3iface.S3API, options ...func(*Uploader)) *Uploader { + u := &Uploader{ + S3: client, + PartSize: DefaultUploadPartSize, + Concurrency: DefaultUploadConcurrency, + LeavePartsOnError: false, + MaxUploadParts: MaxUploadParts, + BufferProvider: defaultUploadBufferProvider(), + } + + for _, option := range options { + option(u) + } + + u.partPool = newByteSlicePool(u.PartSize) + + return u +} + +// NewUploaderWithClient creates a new Uploader instance to upload objects to S3. Pass in +// additional functional options to customize the uploader's behavior. Requires +// a S3 service client to make S3 API calls. +// +// Example: +// +// // The session the S3 Uploader will use +// sess := session.Must(session.NewSession()) +// +// // S3 service client the Upload manager will use. +// s3Svc := s3.New(sess) +// +// // Create an uploader with S3 client and default options +// uploader := s3manager.NewUploaderWithClient(s3Svc) +// +// // Create an uploader with S3 client and custom options +// uploader := s3manager.NewUploaderWithClient(s3Svc, func(u *s3manager.Uploader) { +// u.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploader { + return newUploader(svc, options...) +} + +// Upload uploads an object to S3, intelligently buffering large files into +// smaller chunks and sending them in parallel across multiple goroutines. You +// can configure the buffer size and concurrency through the Uploader's parameters. +// +// Additional functional options can be provided to configure the individual +// upload. These options are copies of the Uploader instance Upload is called from. +// Modifying the options will not impact the original Uploader instance. +// +// Use the WithUploaderRequestOptions helper function to pass in request +// options that will be applied to all API operations made with this uploader. +// +// It is safe to call this method concurrently across goroutines. +// +// Example: +// +// // Upload input parameters +// upParams := &s3manager.UploadInput{ +// Bucket: &bucketName, +// Key: &keyName, +// Body: file, +// } +// +// // Perform an upload. +// result, err := uploader.Upload(upParams) +// +// // Perform upload with options different than the those in the Uploader. +// result, err := uploader.Upload(upParams, func(u *s3manager.Uploader) { +// u.PartSize = 10 * 1024 * 1024 // 10MB part size +// u.LeavePartsOnError = true // Don't delete the parts if the upload fails. +// }) +func (u Uploader) Upload(input *UploadInput, options ...func(*Uploader)) (*UploadOutput, error) { + return u.UploadWithContext(aws.BackgroundContext(), input, options...) +} + +// UploadWithContext uploads an object to S3, intelligently buffering large +// files into smaller chunks and sending them in parallel across multiple +// goroutines. You can configure the buffer size and concurrency through the +// Uploader's parameters. +// +// UploadWithContext is the same as Upload with the additional support for +// Context input parameters. The Context must not be nil. A nil Context will +// cause a panic. Use the context to add deadlining, timeouts, etc. The +// UploadWithContext may create sub-contexts for individual underlying requests. +// +// Additional functional options can be provided to configure the individual +// upload. These options are copies of the Uploader instance Upload is called from. +// Modifying the options will not impact the original Uploader instance. +// +// Use the WithUploaderRequestOptions helper function to pass in request +// options that will be applied to all API operations made with this uploader. +// +// It is safe to call this method concurrently across goroutines. +func (u Uploader) UploadWithContext(ctx aws.Context, input *UploadInput, opts ...func(*Uploader)) (*UploadOutput, error) { + i := uploader{in: input, cfg: u, ctx: ctx} + + for _, opt := range opts { + opt(&i.cfg) + } + + i.cfg.RequestOptions = append(i.cfg.RequestOptions, request.WithAppendUserAgent("S3Manager")) + + return i.upload() +} + +// UploadWithIterator will upload a batched amount of objects to S3. This operation uses +// the iterator pattern to know which object to upload next. Since this is an interface this +// allows for custom defined functionality. +// +// Example: +// +// svc:= s3manager.NewUploader(sess) +// +// objects := []BatchUploadObject{ +// { +// Object: &s3manager.UploadInput { +// Key: aws.String("key"), +// Bucket: aws.String("bucket"), +// }, +// }, +// } +// +// iter := &s3manager.UploadObjectsIterator{Objects: objects} +// if err := svc.UploadWithIterator(aws.BackgroundContext(), iter); err != nil { +// return err +// } +func (u Uploader) UploadWithIterator(ctx aws.Context, iter BatchUploadIterator, opts ...func(*Uploader)) error { + var errs []Error + for iter.Next() { + object := iter.UploadObject() + if _, err := u.UploadWithContext(ctx, object.Object, opts...); err != nil { + s3Err := Error{ + OrigErr: err, + Bucket: object.Object.Bucket, + Key: object.Object.Key, + } + + errs = append(errs, s3Err) + } + + if object.After == nil { + continue + } + + if err := object.After(); err != nil { + s3Err := Error{ + OrigErr: err, + Bucket: object.Object.Bucket, + Key: object.Object.Key, + } + + errs = append(errs, s3Err) + } + } + + if len(errs) > 0 { + return NewBatchError("BatchedUploadIncomplete", "some objects have failed to upload.", errs) + } + return nil +} + +// internal structure to manage an upload to S3. +type uploader struct { + ctx aws.Context + cfg Uploader + + in *UploadInput + + readerPos int64 // current reader position + totalSize int64 // set to -1 if the size is not known +} + +// internal logic for deciding whether to upload a single part or use a +// multipart upload. +func (u *uploader) upload() (*UploadOutput, error) { + if err := u.init(); err != nil { + return nil, awserr.New("ReadRequestBody", "unable to initialize upload", err) + } + defer u.cfg.partPool.Close() + + if u.cfg.PartSize < MinUploadPartSize { + msg := fmt.Sprintf("part size must be at least %d bytes", MinUploadPartSize) + return nil, awserr.New("ConfigError", msg, nil) + } + + // Do one read to determine if we have more than one part + reader, _, cleanup, err := u.nextReader() + if err == io.EOF { // single part + return u.singlePart(reader, cleanup) + } else if err != nil { + cleanup() + return nil, awserr.New("ReadRequestBody", "read upload data failed", err) + } + + mu := multiuploader{uploader: u} + return mu.upload(reader, cleanup) +} + +// init will initialize all default options. +func (u *uploader) init() error { + if err := validateSupportedARNType(aws.StringValue(u.in.Bucket)); err != nil { + return err + } + + if u.cfg.Concurrency == 0 { + u.cfg.Concurrency = DefaultUploadConcurrency + } + if u.cfg.PartSize == 0 { + u.cfg.PartSize = DefaultUploadPartSize + } + if u.cfg.MaxUploadParts == 0 { + u.cfg.MaxUploadParts = MaxUploadParts + } + + // Try to get the total size for some optimizations + if err := u.initSize(); err != nil { + return err + } + + // If PartSize was changed or partPool was never setup then we need to allocated a new pool + // so that we return []byte slices of the correct size + poolCap := u.cfg.Concurrency + 1 + if u.cfg.partPool == nil || u.cfg.partPool.SliceSize() != u.cfg.PartSize { + u.cfg.partPool = newByteSlicePool(u.cfg.PartSize) + u.cfg.partPool.ModifyCapacity(poolCap) + } else { + u.cfg.partPool = &returnCapacityPoolCloser{byteSlicePool: u.cfg.partPool} + u.cfg.partPool.ModifyCapacity(poolCap) + } + + return nil +} + +// initSize tries to detect the total stream size, setting u.totalSize. If +// the size is not known, totalSize is set to -1. +func (u *uploader) initSize() error { + u.totalSize = -1 + + switch r := u.in.Body.(type) { + case io.Seeker: + n, err := aws.SeekerLen(r) + if err != nil { + return err + } + u.totalSize = n + + // Try to adjust partSize if it is too small and account for + // integer division truncation. + if u.totalSize/u.cfg.PartSize >= int64(u.cfg.MaxUploadParts) { + // Add one to the part size to account for remainders + // during the size calculation. e.g odd number of bytes. + u.cfg.PartSize = (u.totalSize / int64(u.cfg.MaxUploadParts)) + 1 + } + } + + return nil +} + +// nextReader returns a seekable reader representing the next packet of data. +// This operation increases the shared u.readerPos counter, but note that it +// does not need to be wrapped in a mutex because nextReader is only called +// from the main thread. +func (u *uploader) nextReader() (io.ReadSeeker, int, func(), error) { + switch r := u.in.Body.(type) { + case readerAtSeeker: + var err error + + n := u.cfg.PartSize + if u.totalSize >= 0 { + bytesLeft := u.totalSize - u.readerPos + + if bytesLeft <= u.cfg.PartSize { + err = io.EOF + n = bytesLeft + } + } + + var ( + reader io.ReadSeeker + cleanup func() + ) + + reader = io.NewSectionReader(r, u.readerPos, n) + if u.cfg.BufferProvider != nil { + reader, cleanup = u.cfg.BufferProvider.GetWriteTo(reader) + } else { + cleanup = func() {} + } + + u.readerPos += n + + return reader, int(n), cleanup, err + + default: + part, err := u.cfg.partPool.Get(u.ctx) + if err != nil { + return nil, 0, func() {}, err + } + + n, err := readFillBuf(r, *part) + u.readerPos += int64(n) + + cleanup := func() { + u.cfg.partPool.Put(part) + } + + return bytes.NewReader((*part)[0:n]), n, cleanup, err + } +} + +func readFillBuf(r io.Reader, b []byte) (offset int, err error) { + for offset < len(b) && err == nil { + var n int + n, err = r.Read(b[offset:]) + offset += n + } + + return offset, err +} + +// singlePart contains upload logic for uploading a single chunk via +// a regular PutObject request. Multipart requests require at least two +// parts, or at least 5MB of data. +func (u *uploader) singlePart(r io.ReadSeeker, cleanup func()) (*UploadOutput, error) { + defer cleanup() + + params := &s3.PutObjectInput{} + awsutil.Copy(params, u.in) + params.Body = r + + // Need to use request form because URL generated in request is + // used in return. + req, out := u.cfg.S3.PutObjectRequest(params) + req.SetContext(u.ctx) + req.ApplyOptions(u.cfg.RequestOptions...) + if err := req.Send(); err != nil { + return nil, err + } + + url := req.HTTPRequest.URL.String() + return &UploadOutput{ + Location: url, + VersionID: out.VersionId, + ETag: out.ETag, + }, nil +} + +// internal structure to manage a specific multipart upload to S3. +type multiuploader struct { + *uploader + wg sync.WaitGroup + m sync.Mutex + err error + uploadID string + parts completedParts +} + +// keeps track of a single chunk of data being sent to S3. +type chunk struct { + buf io.ReadSeeker + num int64 + cleanup func() +} + +// completedParts is a wrapper to make parts sortable by their part number, +// since S3 required this list to be sent in sorted order. +type completedParts []*s3.CompletedPart + +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber } + +// upload will perform a multipart upload using the firstBuf buffer containing +// the first chunk of data. +func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadOutput, error) { + params := &s3.CreateMultipartUploadInput{} + awsutil.Copy(params, u.in) + + // Create the multipart + resp, err := u.cfg.S3.CreateMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...) + if err != nil { + cleanup() + return nil, err + } + u.uploadID = *resp.UploadId + + // Create the workers + ch := make(chan chunk, u.cfg.Concurrency) + for i := 0; i < u.cfg.Concurrency; i++ { + u.wg.Add(1) + go u.readChunk(ch) + } + + // Send part 1 to the workers + var num int64 = 1 + ch <- chunk{buf: firstBuf, num: num, cleanup: cleanup} + + // Read and queue the rest of the parts + for u.geterr() == nil && err == nil { + var ( + reader io.ReadSeeker + nextChunkLen int + ok bool + ) + + reader, nextChunkLen, cleanup, err = u.nextReader() + ok, err = u.shouldContinue(num, nextChunkLen, err) + if !ok { + cleanup() + if err != nil { + u.seterr(err) + } + break + } + + num++ + + ch <- chunk{buf: reader, num: num, cleanup: cleanup} + } + + // Close the channel, wait for workers, and complete upload + close(ch) + u.wg.Wait() + complete := u.complete() + + if err := u.geterr(); err != nil { + return nil, &multiUploadError{ + awsError: awserr.New( + "MultipartUpload", + "upload multipart failed", + err), + uploadID: u.uploadID, + } + } + + // Create a presigned URL of the S3 Get Object in order to have parity with + // single part upload. + getReq, _ := u.cfg.S3.GetObjectRequest(&s3.GetObjectInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + }) + getReq.Config.Credentials = credentials.AnonymousCredentials + getReq.SetContext(u.ctx) + uploadLocation, _, _ := getReq.PresignRequest(1) + + return &UploadOutput{ + Location: uploadLocation, + VersionID: complete.VersionId, + UploadID: u.uploadID, + ETag: complete.ETag, + }, nil +} + +func (u *multiuploader) shouldContinue(part int64, nextChunkLen int, err error) (bool, error) { + if err != nil && err != io.EOF { + return false, awserr.New("ReadRequestBody", "read multipart upload data failed", err) + } + + if nextChunkLen == 0 { + // No need to upload empty part, if file was empty to start + // with empty single part would of been created and never + // started multipart upload. + return false, nil + } + + part++ + // This upload exceeded maximum number of supported parts, error now. + if part > int64(u.cfg.MaxUploadParts) || part > int64(MaxUploadParts) { + var msg string + if part > int64(u.cfg.MaxUploadParts) { + msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit", + u.cfg.MaxUploadParts) + } else { + msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit", + MaxUploadParts) + } + return false, awserr.New("TotalPartsExceeded", msg, nil) + } + + return true, err +} + +// readChunk runs in worker goroutines to pull chunks off of the ch channel +// and send() them as UploadPart requests. +func (u *multiuploader) readChunk(ch chan chunk) { + defer u.wg.Done() + for { + data, ok := <-ch + + if !ok { + break + } + + if u.geterr() == nil { + if err := u.send(data); err != nil { + u.seterr(err) + } + } + + data.cleanup() + } +} + +// send performs an UploadPart request and keeps track of the completed +// part information. +func (u *multiuploader) send(c chunk) error { + params := &s3.UploadPartInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + Body: c.buf, + UploadId: &u.uploadID, + SSECustomerAlgorithm: u.in.SSECustomerAlgorithm, + SSECustomerKey: u.in.SSECustomerKey, + PartNumber: &c.num, + } + + resp, err := u.cfg.S3.UploadPartWithContext(u.ctx, params, u.cfg.RequestOptions...) + if err != nil { + return err + } + + n := c.num + completed := &s3.CompletedPart{ETag: resp.ETag, PartNumber: &n} + + u.m.Lock() + u.parts = append(u.parts, completed) + u.m.Unlock() + + return nil +} + +// geterr is a thread-safe getter for the error object +func (u *multiuploader) geterr() error { + u.m.Lock() + defer u.m.Unlock() + + return u.err +} + +// seterr is a thread-safe setter for the error object +func (u *multiuploader) seterr(e error) { + u.m.Lock() + defer u.m.Unlock() + + u.err = e +} + +// fail will abort the multipart unless LeavePartsOnError is set to true. +func (u *multiuploader) fail() { + if u.cfg.LeavePartsOnError { + return + } + + params := &s3.AbortMultipartUploadInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + UploadId: &u.uploadID, + } + _, err := u.cfg.S3.AbortMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...) + if err != nil { + logMessage(u.cfg.S3, aws.LogDebug, fmt.Sprintf("failed to abort multipart upload, %v", err)) + } +} + +// complete successfully completes a multipart upload and returns the response. +func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput { + if u.geterr() != nil { + u.fail() + return nil + } + + // Parts must be sorted in PartNumber order. + sort.Sort(u.parts) + + params := &s3.CompleteMultipartUploadInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + UploadId: &u.uploadID, + MultipartUpload: &s3.CompletedMultipartUpload{Parts: u.parts}, + } + resp, err := u.cfg.S3.CompleteMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...) + if err != nil { + u.seterr(err) + u.fail() + } + + return resp +} + +type readerAtSeeker interface { + io.ReaderAt + io.ReadSeeker +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go new file mode 100644 index 000000000..f9c6e786d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go @@ -0,0 +1,380 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3manager + +import ( + "io" + "time" +) + +// UploadInput provides the input parameters for uploading a stream or buffer +// to an object in an Amazon S3 bucket. This type is similar to the s3 +// package's PutObjectInput with the exception that the Body member is an +// io.Reader instead of an io.ReadSeeker. +// +// The ContentMD5 member for pre-computed MD5 checksums will be ignored for +// multipart uploads. Objects that will be uploaded in a single part, the +// ContentMD5 will be used. +// +// The Checksum members for pre-computed checksums will be ignored for +// multipart uploads. Objects that will be uploaded in a single part, will +// include the checksum member in the request. +type UploadInput struct { + _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"` + + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) + // in the Amazon S3 User Guide. + // + // When adding a new object, you can use headers to grant ACL-based permissions + // to individual Amazon Web Services accounts or to predefined groups defined + // by Amazon S3. These permissions are then added to the ACL on the object. + // By default, all objects are private. Only the owner has full access control. + // For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html) + // in the Amazon S3 User Guide. + // + // If the bucket that you're uploading objects to uses the bucket owner enforced + // setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. + // Buckets that use this setting only accept PUT requests that don't specify + // an ACL or PUT requests that specify bucket owner full control ACLs, such + // as the bucket-owner-full-control canned ACL or an equivalent form of this + // ACL expressed in the XML format. PUT requests that contain other ACLs (for + // example, custom grants to certain Amazon Web Services accounts) fail and + // return a 400 error with the error code AccessControlListNotSupported. For + // more information, see Controlling ownership of objects and disabling ACLs + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // The readable body payload to send to S3. + Body io.Reader + + // The bucket name to which the PUT action was initiated. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). + // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for + // object encryption with SSE-KMS. + // + // Specifying this header with a PUT action doesn’t affect bucket-level settings + // for S3 Bucket Key. + // + // This functionality is not supported for directory buckets. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Can be used to specify caching behavior along the request/reply chain. For + // more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon + // S3 fails the request with the HTTP status code 400 Bad Request. + // + // For the x-amz-checksum-algorithm header, replace algorithm with the supported + // algorithm from the following list: + // + // * CRC32 + // + // * CRC32C + // + // * SHA1 + // + // * SHA256 + // + // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If the individual checksum value you provide through x-amz-checksum-algorithm + // doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, + // Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum + // algorithm that matches the provided value in x-amz-checksum-algorithm . + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32 checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32C checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 160-bit SHA-1 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 256-bit SHA-256 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + + // Specifies presentational information for the object. For more information, + // see https://www.rfc-editor.org/rfc/rfc6266#section-4 (https://www.rfc-editor.org/rfc/rfc6266#section-4). + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding + // (https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding). + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // The base64-encoded 128-bit MD5 digest of the message (without the headers) + // according to RFC 1864. This header can be used as a message integrity check + // to verify that the data is the same data that was originally sent. Although + // it is optional, we recommend using the Content-MD5 mechanism as an end-to-end + // integrity check. For more information about REST request authentication, + // see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). + // + // The Content-MD5 header is required for any request to upload an object with + // a retention period configured using Amazon S3 Object Lock. For more information + // about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // If the ContentMD5 is provided for a multipart upload, it will be ignored. + // Objects that will be uploaded in a single part, the ContentMD5 will be used. + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + + // A standard MIME type describing the format of the contents. For more information, + // see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type). + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The date and time at which the object is no longer cacheable. For more information, + // see https://www.rfc-editor.org/rfc/rfc7234#section-5.3 (https://www.rfc-editor.org/rfc/rfc7234#section-5.3). + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Object key for which the PUT action was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether a legal hold will be applied to this object. For more information + // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode that you want to apply to this object. + // + // This functionality is not supported for directory buckets. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when you want this object's Object Lock to expire. Must + // be formatted as a timestamp parameter. + // + // This functionality is not supported for directory buckets. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + // + // This functionality is not supported for directory buckets. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the Amazon Web Services KMS Encryption Context to use for object + // encryption. The value of this header is a base64-encoded UTF-8 string holding + // JSON with the encryption context key-value pairs. This value is stored as + // object metadata and automatically gets passed on to Amazon Web Services KMS + // for future GetObject or CopyObject operations on this object. This value + // must be explicitly added during CopyObject operations. + // + // This functionality is not supported for directory buckets. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, + // this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key Management + // Service (KMS) symmetric encryption customer managed key that was used for + // the object. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, + // but do not providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 + // uses the Amazon Web Services managed key (aws/s3) to protect the data. If + // the KMS key does not exist in the same account that's issuing the command, + // you must use the full ARN and not just the ID. + // + // This functionality is not supported for directory buckets. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm that was used when you store this object + // in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse). + // + // General purpose buckets - You have four mutually exclusive options to protect + // data using server-side encryption in Amazon S3, depending on how you choose + // to manage the encryption keys. Specifically, the encryption key options are + // Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or + // DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with + // server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. + // You can optionally tell Amazon S3 to encrypt data at rest by using server-side + // encryption with other key options. For more information, see Using Server-Side + // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) + // in the Amazon S3 User Guide. + // + // Directory buckets - For directory buckets, only the server-side encryption + // with Amazon S3 managed keys (SSE-S3) (AES256) value is supported. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high availability. + // Depending on performance needs, you can specify a different Storage Class. + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 User Guide. + // + // * For directory buckets, only the S3 Express One Zone storage class is + // supported to store newly created objects. + // + // * Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + // (For example, "Key1=Value1") + // + // This functionality is not supported for directory buckets. + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. For information about object + // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html) + // in the Amazon S3 User Guide. + // + // In the following example, the request header sets the redirect to an object + // (anotherPage.html) in the same bucket: + // + // x-amz-website-redirect-location: /anotherPage.html + // + // In the following example, the request header sets the object redirect to + // another website: + // + // x-amz-website-redirect-location: http://www.example.com/ + // + // For more information about website hosting in Amazon S3, see Hosting Websites + // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) + // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/writer_read_from.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/writer_read_from.go new file mode 100644 index 000000000..a74a02d93 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/writer_read_from.go @@ -0,0 +1,83 @@ +package s3manager + +import ( + "bufio" + "io" + "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// WriterReadFrom defines an interface implementing io.Writer and io.ReaderFrom +type WriterReadFrom interface { + io.Writer + io.ReaderFrom +} + +// WriterReadFromProvider provides an implementation of io.ReadFrom for the given io.Writer +type WriterReadFromProvider interface { + GetReadFrom(writer io.Writer) (w WriterReadFrom, cleanup func()) +} + +type bufferedWriter interface { + WriterReadFrom + Flush() error + Reset(io.Writer) +} + +type bufferedReadFrom struct { + bufferedWriter +} + +func (b *bufferedReadFrom) ReadFrom(r io.Reader) (int64, error) { + n, err := b.bufferedWriter.ReadFrom(r) + if flushErr := b.Flush(); flushErr != nil && err == nil { + err = flushErr + } + return n, err +} + +// PooledBufferedReadFromProvider is a WriterReadFromProvider that uses a sync.Pool +// to manage allocation and reuse of *bufio.Writer structures. +type PooledBufferedReadFromProvider struct { + pool sync.Pool +} + +// NewPooledBufferedWriterReadFromProvider returns a new PooledBufferedReadFromProvider +// Size is used to control the size of the underlying *bufio.Writer created for +// calls to GetReadFrom. +func NewPooledBufferedWriterReadFromProvider(size int) *PooledBufferedReadFromProvider { + if size < int(32*sdkio.KibiByte) { + size = int(64 * sdkio.KibiByte) + } + + return &PooledBufferedReadFromProvider{ + pool: sync.Pool{ + New: func() interface{} { + return &bufferedReadFrom{bufferedWriter: bufio.NewWriterSize(nil, size)} + }, + }, + } +} + +// GetReadFrom takes an io.Writer and wraps it with a type which satisfies the WriterReadFrom +// interface/ Additionally a cleanup function is provided which must be called after usage of the WriterReadFrom +// has been completed in order to allow the reuse of the *bufio.Writer +func (p *PooledBufferedReadFromProvider) GetReadFrom(writer io.Writer) (r WriterReadFrom, cleanup func()) { + buffer := p.pool.Get().(*bufferedReadFrom) + buffer.Reset(writer) + r = buffer + cleanup = func() { + buffer.Reset(nil) // Reset to nil writer to release reference + p.pool.Put(buffer) + } + return r, cleanup +} + +type suppressWriterAt struct { + suppressed io.Reader +} + +func (s *suppressWriterAt) Read(p []byte) (n int, err error) { + return s.suppressed.Read(p) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go new file mode 100644 index 000000000..3e75d0e94 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -0,0 +1,108 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +// S3 provides the API operation methods for making requests to +// Amazon Simple Storage Service. See this package's package overview docs +// for details on the service. +// +// S3 methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type S3 struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "s3" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "S3" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the S3 client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// +// mySession := session.Must(session.NewSession()) +// +// // Create a S3 client from just a session. +// svc := s3.New(mySession) +// +// // Create a S3 client with additional configuration +// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "s3" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *S3 { + svc := &S3{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2006-03-01", + ResolvedRegion: resolvedRegion, + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.BuildNamedHandler(v4.SignRequestHandler.Name, func(s *v4.Signer) { + s.DisableURIPathEscaping = true + })) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + svc.Handlers.BuildStream.PushBackNamed(restxml.BuildHandler) + svc.Handlers.UnmarshalStream.PushBackNamed(restxml.UnmarshalHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a S3 operation and runs any +// custom request initialization. +func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go new file mode 100644 index 000000000..57a0bd92c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go @@ -0,0 +1,84 @@ +package s3 + +import ( + "crypto/md5" + "encoding/base64" + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil) + +func validateSSERequiresSSL(r *request.Request) { + if r.HTTPRequest.URL.Scheme == "https" { + return + } + + if iface, ok := r.Params.(sseCustomerKeyGetter); ok { + if len(iface.getSSECustomerKey()) > 0 { + r.Error = errSSERequiresSSL + return + } + } + + if iface, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { + if len(iface.getCopySourceSSECustomerKey()) > 0 { + r.Error = errSSERequiresSSL + return + } + } +} + +const ( + sseKeyHeader = "x-amz-server-side-encryption-customer-key" + sseKeyMD5Header = sseKeyHeader + "-md5" +) + +func computeSSEKeyMD5(r *request.Request) { + var key string + if g, ok := r.Params.(sseCustomerKeyGetter); ok { + key = g.getSSECustomerKey() + } + + computeKeyMD5(sseKeyHeader, sseKeyMD5Header, key, r.HTTPRequest) +} + +const ( + copySrcSSEKeyHeader = "x-amz-copy-source-server-side-encryption-customer-key" + copySrcSSEKeyMD5Header = copySrcSSEKeyHeader + "-md5" +) + +func computeCopySourceSSEKeyMD5(r *request.Request) { + var key string + if g, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { + key = g.getCopySourceSSECustomerKey() + } + + computeKeyMD5(copySrcSSEKeyHeader, copySrcSSEKeyMD5Header, key, r.HTTPRequest) +} + +func computeKeyMD5(keyHeader, keyMD5Header, key string, r *http.Request) { + if len(key) == 0 { + // Backwards compatiablity where user just set the header value instead + // of using the API parameter, or setting the header value for an + // operation without the parameters modeled. + key = r.Header.Get(keyHeader) + if len(key) == 0 { + return + } + + // In backwards compatible, the header's value is not base64 encoded, + // and needs to be encoded and updated by the SDK's customizations. + b64Key := base64.StdEncoding.EncodeToString([]byte(key)) + r.Header.Set(keyHeader, b64Key) + } + + // Only update Key's MD5 if not already set. + if len(r.Header.Get(keyMD5Header)) == 0 { + sum := md5.Sum([]byte(key)) + keyMD5 := base64.StdEncoding.EncodeToString(sum[:]) + r.Header.Set(keyMD5Header, keyMD5) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go new file mode 100644 index 000000000..096adc091 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go @@ -0,0 +1,47 @@ +package s3 + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +func copyMultipartStatusOKUnmarshalError(r *request.Request) { + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "unable to read response body", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + // Note, some middleware later in the stack like restxml.Unmarshal expect a valid, non-closed Body + // even in case of an error, so we replace it with an empty Reader. + r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewBuffer(nil)) + return + } + + body := bytes.NewReader(b) + r.HTTPResponse.Body = ioutil.NopCloser(body) + defer body.Seek(0, sdkio.SeekStart) + + unmarshalError(r) + if err, ok := r.Error.(awserr.Error); ok && err != nil { + if err.Code() == request.ErrCodeSerialization && + err.OrigErr() != io.EOF { + r.Error = nil + return + } + // if empty payload + if err.OrigErr() == io.EOF { + r.HTTPResponse.StatusCode = http.StatusInternalServerError + } else { + r.HTTPResponse.StatusCode = http.StatusServiceUnavailable + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go new file mode 100644 index 000000000..6eecf6691 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go @@ -0,0 +1,114 @@ +package s3 + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code"` + Message string `xml:"Message"` +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + defer io.Copy(ioutil.Discard, r.HTTPResponse.Body) + + // Bucket exists in a different region, and request needs + // to be made to the correct region. + if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { + msg := fmt.Sprintf( + "incorrect region, the bucket is not in '%s' region at endpoint '%s'", + aws.StringValue(r.Config.Region), + aws.StringValue(r.Config.Endpoint), + ) + if v := r.HTTPResponse.Header.Get("x-amz-bucket-region"); len(v) != 0 { + msg += fmt.Sprintf(", bucket is in '%s' region", v) + } + r.Error = awserr.NewRequestFailure( + awserr.New("BucketRegionError", msg, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Attempt to parse error from body if it is known + var errResp xmlErrorResponse + var err error + if r.HTTPResponse.StatusCode >= 200 && r.HTTPResponse.StatusCode < 300 { + err = s3unmarshalXMLError(&errResp, r.HTTPResponse.Body) + } else { + err = xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body) + } + + if err != nil { + var errorMsg string + if err == io.EOF { + errorMsg = "empty response payload" + } else { + errorMsg = "failed to unmarshal error message" + } + + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + errorMsg, err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Fallback to status code converted to message if still no error code + if len(errResp.Code) == 0 { + statusText := http.StatusText(r.HTTPResponse.StatusCode) + errResp.Code = strings.Replace(statusText, " ", "", -1) + errResp.Message = statusText + } + + r.Error = awserr.NewRequestFailure( + awserr.New(errResp.Code, errResp.Message, err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) +} + +// A RequestFailure provides access to the S3 Request ID and Host ID values +// returned from API operation errors. Getting the error as a string will +// return the formated error with the same information as awserr.RequestFailure, +// while also adding the HostID value from the response. +type RequestFailure interface { + awserr.RequestFailure + + // Host ID is the S3 Host ID needed for debug, and contacting support + HostID() string +} + +// s3unmarshalXMLError is s3 specific xml error unmarshaler +// for 200 OK errors and response payloads. +// This function differs from the xmlUtil.UnmarshalXMLError +// func. It does not ignore the EOF error and passes it up. +// Related to bug fix for `s3 200 OK response with empty payload` +func s3unmarshalXMLError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := xml.NewDecoder(body).Decode(v) + if err != nil && err != io.EOF { + return awserr.NewUnmarshalError(err, + "failed to unmarshal error message", errBuf.Bytes()) + } + + return err +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go new file mode 100644 index 000000000..2596c694b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go @@ -0,0 +1,214 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// WaitUntilBucketExists uses the Amazon S3 API operation +// HeadBucket to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error { + return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilBucketExistsWithContext is an extended version of WaitUntilBucketExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilBucketExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 200, + }, + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 301, + }, + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 403, + }, + { + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadBucketInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadBucketRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilBucketNotExists uses the Amazon S3 API operation +// HeadBucket to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error { + return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilBucketNotExistsWithContext is an extended version of WaitUntilBucketNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilBucketNotExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadBucketInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadBucketRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilObjectExists uses the Amazon S3 API operation +// HeadObject to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error { + return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilObjectExistsWithContext is an extended version of WaitUntilObjectExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilObjectExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 200, + }, + { + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadObjectInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadObjectRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilObjectNotExists uses the Amazon S3 API operation +// HeadObject to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error { + return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilObjectNotExistsWithContext is an extended version of WaitUntilObjectNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilObjectNotExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilObjectNotExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadObjectInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadObjectRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/api.go b/vendor/github.com/aws/aws-sdk-go/service/sso/api.go new file mode 100644 index 000000000..b8f590f71 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/api.go @@ -0,0 +1,1367 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sso + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opGetRoleCredentials = "GetRoleCredentials" + +// GetRoleCredentialsRequest generates a "aws/request.Request" representing the +// client's request for the GetRoleCredentials operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetRoleCredentials for more information on using the GetRoleCredentials +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetRoleCredentialsRequest method. +// req, resp := client.GetRoleCredentialsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials +func (c *SSO) GetRoleCredentialsRequest(input *GetRoleCredentialsInput) (req *request.Request, output *GetRoleCredentialsOutput) { + op := &request.Operation{ + Name: opGetRoleCredentials, + HTTPMethod: "GET", + HTTPPath: "/federation/credentials", + } + + if input == nil { + input = &GetRoleCredentialsInput{} + } + + output = &GetRoleCredentialsOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// GetRoleCredentials API operation for AWS Single Sign-On. +// +// Returns the STS short-term credentials for a given role name that is assigned +// to the user. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Single Sign-On's +// API operation GetRoleCredentials for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +// +// - TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// - ResourceNotFoundException +// The specified resource doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials +func (c *SSO) GetRoleCredentials(input *GetRoleCredentialsInput) (*GetRoleCredentialsOutput, error) { + req, out := c.GetRoleCredentialsRequest(input) + return out, req.Send() +} + +// GetRoleCredentialsWithContext is the same as GetRoleCredentials with the addition of +// the ability to pass a context and additional request options. +// +// See GetRoleCredentials for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) GetRoleCredentialsWithContext(ctx aws.Context, input *GetRoleCredentialsInput, opts ...request.Option) (*GetRoleCredentialsOutput, error) { + req, out := c.GetRoleCredentialsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListAccountRoles = "ListAccountRoles" + +// ListAccountRolesRequest generates a "aws/request.Request" representing the +// client's request for the ListAccountRoles operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAccountRoles for more information on using the ListAccountRoles +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListAccountRolesRequest method. +// req, resp := client.ListAccountRolesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles +func (c *SSO) ListAccountRolesRequest(input *ListAccountRolesInput) (req *request.Request, output *ListAccountRolesOutput) { + op := &request.Operation{ + Name: opListAccountRoles, + HTTPMethod: "GET", + HTTPPath: "/assignment/roles", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListAccountRolesInput{} + } + + output = &ListAccountRolesOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// ListAccountRoles API operation for AWS Single Sign-On. +// +// Lists all roles that are assigned to the user for a given AWS account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Single Sign-On's +// API operation ListAccountRoles for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +// +// - TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// - ResourceNotFoundException +// The specified resource doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles +func (c *SSO) ListAccountRoles(input *ListAccountRolesInput) (*ListAccountRolesOutput, error) { + req, out := c.ListAccountRolesRequest(input) + return out, req.Send() +} + +// ListAccountRolesWithContext is the same as ListAccountRoles with the addition of +// the ability to pass a context and additional request options. +// +// See ListAccountRoles for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) ListAccountRolesWithContext(ctx aws.Context, input *ListAccountRolesInput, opts ...request.Option) (*ListAccountRolesOutput, error) { + req, out := c.ListAccountRolesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListAccountRolesPages iterates over the pages of a ListAccountRoles operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAccountRoles method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAccountRoles operation. +// pageNum := 0 +// err := client.ListAccountRolesPages(params, +// func(page *sso.ListAccountRolesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *SSO) ListAccountRolesPages(input *ListAccountRolesInput, fn func(*ListAccountRolesOutput, bool) bool) error { + return c.ListAccountRolesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAccountRolesPagesWithContext same as ListAccountRolesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) ListAccountRolesPagesWithContext(ctx aws.Context, input *ListAccountRolesInput, fn func(*ListAccountRolesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAccountRolesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAccountRolesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAccountRolesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListAccounts = "ListAccounts" + +// ListAccountsRequest generates a "aws/request.Request" representing the +// client's request for the ListAccounts operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAccounts for more information on using the ListAccounts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListAccountsRequest method. +// req, resp := client.ListAccountsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts +func (c *SSO) ListAccountsRequest(input *ListAccountsInput) (req *request.Request, output *ListAccountsOutput) { + op := &request.Operation{ + Name: opListAccounts, + HTTPMethod: "GET", + HTTPPath: "/assignment/accounts", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListAccountsInput{} + } + + output = &ListAccountsOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// ListAccounts API operation for AWS Single Sign-On. +// +// Lists all AWS accounts assigned to the user. These AWS accounts are assigned +// by the administrator of the account. For more information, see Assign User +// Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers) +// in the IAM Identity Center User Guide. This operation returns a paginated +// response. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Single Sign-On's +// API operation ListAccounts for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +// +// - TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// - ResourceNotFoundException +// The specified resource doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts +func (c *SSO) ListAccounts(input *ListAccountsInput) (*ListAccountsOutput, error) { + req, out := c.ListAccountsRequest(input) + return out, req.Send() +} + +// ListAccountsWithContext is the same as ListAccounts with the addition of +// the ability to pass a context and additional request options. +// +// See ListAccounts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) ListAccountsWithContext(ctx aws.Context, input *ListAccountsInput, opts ...request.Option) (*ListAccountsOutput, error) { + req, out := c.ListAccountsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListAccountsPages iterates over the pages of a ListAccounts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAccounts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAccounts operation. +// pageNum := 0 +// err := client.ListAccountsPages(params, +// func(page *sso.ListAccountsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *SSO) ListAccountsPages(input *ListAccountsInput, fn func(*ListAccountsOutput, bool) bool) error { + return c.ListAccountsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAccountsPagesWithContext same as ListAccountsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) ListAccountsPagesWithContext(ctx aws.Context, input *ListAccountsInput, fn func(*ListAccountsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAccountsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAccountsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAccountsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opLogout = "Logout" + +// LogoutRequest generates a "aws/request.Request" representing the +// client's request for the Logout operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See Logout for more information on using the Logout +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the LogoutRequest method. +// req, resp := client.LogoutRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout +func (c *SSO) LogoutRequest(input *LogoutInput) (req *request.Request, output *LogoutOutput) { + op := &request.Operation{ + Name: opLogout, + HTTPMethod: "POST", + HTTPPath: "/logout", + } + + if input == nil { + input = &LogoutInput{} + } + + output = &LogoutOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// Logout API operation for AWS Single Sign-On. +// +// Removes the locally stored SSO tokens from the client-side cache and sends +// an API call to the IAM Identity Center service to invalidate the corresponding +// server-side IAM Identity Center sign in session. +// +// If a user uses IAM Identity Center to access the AWS CLI, the user’s IAM +// Identity Center sign in session is used to obtain an IAM session, as specified +// in the corresponding IAM Identity Center permission set. More specifically, +// IAM Identity Center assumes an IAM role in the target account on behalf of +// the user, and the corresponding temporary AWS credentials are returned to +// the client. +// +// After user logout, any existing IAM role sessions that were created by using +// IAM Identity Center permission sets continue based on the duration configured +// in the permission set. For more information, see User authentications (https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html) +// in the IAM Identity Center User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Single Sign-On's +// API operation Logout for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +// +// - TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout +func (c *SSO) Logout(input *LogoutInput) (*LogoutOutput, error) { + req, out := c.LogoutRequest(input) + return out, req.Send() +} + +// LogoutWithContext is the same as Logout with the addition of +// the ability to pass a context and additional request options. +// +// See Logout for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) LogoutWithContext(ctx aws.Context, input *LogoutInput, opts ...request.Option) (*LogoutOutput, error) { + req, out := c.LogoutRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Provides information about your AWS account. +type AccountInfo struct { + _ struct{} `type:"structure"` + + // The identifier of the AWS account that is assigned to the user. + AccountId *string `locationName:"accountId" type:"string"` + + // The display name of the AWS account that is assigned to the user. + AccountName *string `locationName:"accountName" type:"string"` + + // The email address of the AWS account that is assigned to the user. + EmailAddress *string `locationName:"emailAddress" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccountInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccountInfo) GoString() string { + return s.String() +} + +// SetAccountId sets the AccountId field's value. +func (s *AccountInfo) SetAccountId(v string) *AccountInfo { + s.AccountId = &v + return s +} + +// SetAccountName sets the AccountName field's value. +func (s *AccountInfo) SetAccountName(v string) *AccountInfo { + s.AccountName = &v + return s +} + +// SetEmailAddress sets the EmailAddress field's value. +func (s *AccountInfo) SetEmailAddress(v string) *AccountInfo { + s.EmailAddress = &v + return s +} + +type GetRoleCredentialsInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The token issued by the CreateToken API call. For more information, see CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the IAM Identity Center OIDC API Reference Guide. + // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by GetRoleCredentialsInput's + // String and GoString methods. + // + // AccessToken is a required field + AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` + + // The identifier for the AWS account that is assigned to the user. + // + // AccountId is a required field + AccountId *string `location:"querystring" locationName:"account_id" type:"string" required:"true"` + + // The friendly name of the role that is assigned to the user. + // + // RoleName is a required field + RoleName *string `location:"querystring" locationName:"role_name" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetRoleCredentialsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetRoleCredentialsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRoleCredentialsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRoleCredentialsInput"} + if s.AccessToken == nil { + invalidParams.Add(request.NewErrParamRequired("AccessToken")) + } + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *GetRoleCredentialsInput) SetAccessToken(v string) *GetRoleCredentialsInput { + s.AccessToken = &v + return s +} + +// SetAccountId sets the AccountId field's value. +func (s *GetRoleCredentialsInput) SetAccountId(v string) *GetRoleCredentialsInput { + s.AccountId = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *GetRoleCredentialsInput) SetRoleName(v string) *GetRoleCredentialsInput { + s.RoleName = &v + return s +} + +type GetRoleCredentialsOutput struct { + _ struct{} `type:"structure"` + + // The credentials for the role that is assigned to the user. + RoleCredentials *RoleCredentials `locationName:"roleCredentials" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetRoleCredentialsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetRoleCredentialsOutput) GoString() string { + return s.String() +} + +// SetRoleCredentials sets the RoleCredentials field's value. +func (s *GetRoleCredentialsOutput) SetRoleCredentials(v *RoleCredentials) *GetRoleCredentialsOutput { + s.RoleCredentials = v + return s +} + +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +type InvalidRequestException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestException) GoString() string { + return s.String() +} + +func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { + return &InvalidRequestException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRequestException) Code() string { + return "InvalidRequestException" +} + +// Message returns the exception's message. +func (s *InvalidRequestException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRequestException) OrigErr() error { + return nil +} + +func (s *InvalidRequestException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID +} + +type ListAccountRolesInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The token issued by the CreateToken API call. For more information, see CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the IAM Identity Center OIDC API Reference Guide. + // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by ListAccountRolesInput's + // String and GoString methods. + // + // AccessToken is a required field + AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` + + // The identifier for the AWS account that is assigned to the user. + // + // AccountId is a required field + AccountId *string `location:"querystring" locationName:"account_id" type:"string" required:"true"` + + // The number of items that clients can request per page. + MaxResults *int64 `location:"querystring" locationName:"max_result" min:"1" type:"integer"` + + // The page token from the previous response output when you request subsequent + // pages. + NextToken *string `location:"querystring" locationName:"next_token" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountRolesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountRolesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAccountRolesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAccountRolesInput"} + if s.AccessToken == nil { + invalidParams.Add(request.NewErrParamRequired("AccessToken")) + } + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *ListAccountRolesInput) SetAccessToken(v string) *ListAccountRolesInput { + s.AccessToken = &v + return s +} + +// SetAccountId sets the AccountId field's value. +func (s *ListAccountRolesInput) SetAccountId(v string) *ListAccountRolesInput { + s.AccountId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListAccountRolesInput) SetMaxResults(v int64) *ListAccountRolesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAccountRolesInput) SetNextToken(v string) *ListAccountRolesInput { + s.NextToken = &v + return s +} + +type ListAccountRolesOutput struct { + _ struct{} `type:"structure"` + + // The page token client that is used to retrieve the list of accounts. + NextToken *string `locationName:"nextToken" type:"string"` + + // A paginated response with the list of roles and the next token if more results + // are available. + RoleList []*RoleInfo `locationName:"roleList" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountRolesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountRolesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAccountRolesOutput) SetNextToken(v string) *ListAccountRolesOutput { + s.NextToken = &v + return s +} + +// SetRoleList sets the RoleList field's value. +func (s *ListAccountRolesOutput) SetRoleList(v []*RoleInfo) *ListAccountRolesOutput { + s.RoleList = v + return s +} + +type ListAccountsInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The token issued by the CreateToken API call. For more information, see CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the IAM Identity Center OIDC API Reference Guide. + // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by ListAccountsInput's + // String and GoString methods. + // + // AccessToken is a required field + AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` + + // This is the number of items clients can request per page. + MaxResults *int64 `location:"querystring" locationName:"max_result" min:"1" type:"integer"` + + // (Optional) When requesting subsequent pages, this is the page token from + // the previous response output. + NextToken *string `location:"querystring" locationName:"next_token" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAccountsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAccountsInput"} + if s.AccessToken == nil { + invalidParams.Add(request.NewErrParamRequired("AccessToken")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *ListAccountsInput) SetAccessToken(v string) *ListAccountsInput { + s.AccessToken = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListAccountsInput) SetMaxResults(v int64) *ListAccountsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAccountsInput) SetNextToken(v string) *ListAccountsInput { + s.NextToken = &v + return s +} + +type ListAccountsOutput struct { + _ struct{} `type:"structure"` + + // A paginated response with the list of account information and the next token + // if more results are available. + AccountList []*AccountInfo `locationName:"accountList" type:"list"` + + // The page token client that is used to retrieve the list of accounts. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountsOutput) GoString() string { + return s.String() +} + +// SetAccountList sets the AccountList field's value. +func (s *ListAccountsOutput) SetAccountList(v []*AccountInfo) *ListAccountsOutput { + s.AccountList = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAccountsOutput) SetNextToken(v string) *ListAccountsOutput { + s.NextToken = &v + return s +} + +type LogoutInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The token issued by the CreateToken API call. For more information, see CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the IAM Identity Center OIDC API Reference Guide. + // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by LogoutInput's + // String and GoString methods. + // + // AccessToken is a required field + AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LogoutInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LogoutInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LogoutInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LogoutInput"} + if s.AccessToken == nil { + invalidParams.Add(request.NewErrParamRequired("AccessToken")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *LogoutInput) SetAccessToken(v string) *LogoutInput { + s.AccessToken = &v + return s +} + +type LogoutOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LogoutOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LogoutOutput) GoString() string { + return s.String() +} + +// The specified resource doesn't exist. +type ResourceNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResourceNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResourceNotFoundException) GoString() string { + return s.String() +} + +func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { + return &ResourceNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceNotFoundException) Code() string { + return "ResourceNotFoundException" +} + +// Message returns the exception's message. +func (s *ResourceNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceNotFoundException) OrigErr() error { + return nil +} + +func (s *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Provides information about the role credentials that are assigned to the +// user. +type RoleCredentials struct { + _ struct{} `type:"structure"` + + // The identifier used for the temporary security credentials. For more information, + // see Using Temporary Security Credentials to Request Access to AWS Resources + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + AccessKeyId *string `locationName:"accessKeyId" type:"string"` + + // The date on which temporary security credentials expire. + Expiration *int64 `locationName:"expiration" type:"long"` + + // The key that is used to sign the request. For more information, see Using + // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + // + // SecretAccessKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by RoleCredentials's + // String and GoString methods. + SecretAccessKey *string `locationName:"secretAccessKey" type:"string" sensitive:"true"` + + // The token used for temporary credentials. For more information, see Using + // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + // + // SessionToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by RoleCredentials's + // String and GoString methods. + SessionToken *string `locationName:"sessionToken" type:"string" sensitive:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RoleCredentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RoleCredentials) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *RoleCredentials) SetAccessKeyId(v string) *RoleCredentials { + s.AccessKeyId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *RoleCredentials) SetExpiration(v int64) *RoleCredentials { + s.Expiration = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *RoleCredentials) SetSecretAccessKey(v string) *RoleCredentials { + s.SecretAccessKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *RoleCredentials) SetSessionToken(v string) *RoleCredentials { + s.SessionToken = &v + return s +} + +// Provides information about the role that is assigned to the user. +type RoleInfo struct { + _ struct{} `type:"structure"` + + // The identifier of the AWS account assigned to the user. + AccountId *string `locationName:"accountId" type:"string"` + + // The friendly name of the role that is assigned to the user. + RoleName *string `locationName:"roleName" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RoleInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RoleInfo) GoString() string { + return s.String() +} + +// SetAccountId sets the AccountId field's value. +func (s *RoleInfo) SetAccountId(v string) *RoleInfo { + s.AccountId = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *RoleInfo) SetRoleName(v string) *RoleInfo { + s.RoleName = &v + return s +} + +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +type TooManyRequestsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TooManyRequestsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TooManyRequestsException) GoString() string { + return s.String() +} + +func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { + return &TooManyRequestsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TooManyRequestsException) Code() string { + return "TooManyRequestsException" +} + +// Message returns the exception's message. +func (s *TooManyRequestsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TooManyRequestsException) OrigErr() error { + return nil +} + +func (s *TooManyRequestsException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TooManyRequestsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TooManyRequestsException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +type UnauthorizedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnauthorizedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnauthorizedException) GoString() string { + return s.String() +} + +func newErrorUnauthorizedException(v protocol.ResponseMetadata) error { + return &UnauthorizedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnauthorizedException) Code() string { + return "UnauthorizedException" +} + +// Message returns the exception's message. +func (s *UnauthorizedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnauthorizedException) OrigErr() error { + return nil +} + +func (s *UnauthorizedException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnauthorizedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnauthorizedException) RequestID() string { + return s.RespMetadata.RequestID +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go new file mode 100644 index 000000000..15e61a322 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go @@ -0,0 +1,45 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package sso provides the client and types for making API +// requests to AWS Single Sign-On. +// +// AWS IAM Identity Center (successor to AWS Single Sign-On) Portal is a web +// service that makes it easy for you to assign user access to IAM Identity +// Center resources such as the AWS access portal. Users can get AWS account +// applications and roles assigned to them and get federated into the application. +// +// Although AWS Single Sign-On was renamed, the sso and identitystore API namespaces +// will continue to retain their original name for backward compatibility purposes. +// For more information, see IAM Identity Center rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed). +// +// This reference guide describes the IAM Identity Center Portal operations +// that you can call programatically and includes detailed information on data +// types and errors. +// +// AWS provides SDKs that consist of libraries and sample code for various programming +// languages and platforms, such as Java, Ruby, .Net, iOS, or Android. The SDKs +// provide a convenient way to create programmatic access to IAM Identity Center +// and other AWS services. For more information about the AWS SDKs, including +// how to download and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/). +// +// See https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10 for more information on this service. +// +// See sso package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/ +// +// # Using the Client +// +// To contact AWS Single Sign-On with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Single Sign-On client SSO for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/#New +package sso diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go new file mode 100644 index 000000000..77a6792e3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go @@ -0,0 +1,44 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sso + +import ( + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + + // ErrCodeInvalidRequestException for service response error code + // "InvalidRequestException". + // + // Indicates that a problem occurred with the input to the request. For example, + // a required parameter might be missing or out of range. + ErrCodeInvalidRequestException = "InvalidRequestException" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // The specified resource doesn't exist. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" + + // ErrCodeTooManyRequestsException for service response error code + // "TooManyRequestsException". + // + // Indicates that the request is being made too frequently and is more than + // what the server can handle. + ErrCodeTooManyRequestsException = "TooManyRequestsException" + + // ErrCodeUnauthorizedException for service response error code + // "UnauthorizedException". + // + // Indicates that the request is not authorized. This can happen due to an invalid + // access token in the request. + ErrCodeUnauthorizedException = "UnauthorizedException" +) + +var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "InvalidRequestException": newErrorInvalidRequestException, + "ResourceNotFoundException": newErrorResourceNotFoundException, + "TooManyRequestsException": newErrorTooManyRequestsException, + "UnauthorizedException": newErrorUnauthorizedException, +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/service.go b/vendor/github.com/aws/aws-sdk-go/service/sso/service.go new file mode 100644 index 000000000..7094cfe41 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/service.go @@ -0,0 +1,106 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sso + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// SSO provides the API operation methods for making requests to +// AWS Single Sign-On. See this package's package overview docs +// for details on the service. +// +// SSO methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type SSO struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "SSO" // Name of service. + EndpointsID = "portal.sso" // ID to lookup a service endpoint with. + ServiceID = "SSO" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the SSO client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// +// mySession := session.Must(session.NewSession()) +// +// // Create a SSO client from just a session. +// svc := sso.New(mySession) +// +// // Create a SSO client with additional configuration +// svc := sso.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSO { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "awsssoportal" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *SSO { + svc := &SSO{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2019-06-10", + ResolvedRegion: resolvedRegion, + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed( + protocol.NewUnmarshalErrorHandler(restjson.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), + ) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SSO operation and runs any +// custom request initialization. +func (c *SSO) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go new file mode 100644 index 000000000..818cab7cd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go @@ -0,0 +1,86 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package ssoiface provides an interface to enable mocking the AWS Single Sign-On service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package ssoiface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/sso" +) + +// SSOAPI provides an interface to enable mocking the +// sso.SSO service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // AWS Single Sign-On. +// func myFunc(svc ssoiface.SSOAPI) bool { +// // Make svc.GetRoleCredentials request +// } +// +// func main() { +// sess := session.New() +// svc := sso.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockSSOClient struct { +// ssoiface.SSOAPI +// } +// func (m *mockSSOClient) GetRoleCredentials(input *sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockSSOClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type SSOAPI interface { + GetRoleCredentials(*sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) + GetRoleCredentialsWithContext(aws.Context, *sso.GetRoleCredentialsInput, ...request.Option) (*sso.GetRoleCredentialsOutput, error) + GetRoleCredentialsRequest(*sso.GetRoleCredentialsInput) (*request.Request, *sso.GetRoleCredentialsOutput) + + ListAccountRoles(*sso.ListAccountRolesInput) (*sso.ListAccountRolesOutput, error) + ListAccountRolesWithContext(aws.Context, *sso.ListAccountRolesInput, ...request.Option) (*sso.ListAccountRolesOutput, error) + ListAccountRolesRequest(*sso.ListAccountRolesInput) (*request.Request, *sso.ListAccountRolesOutput) + + ListAccountRolesPages(*sso.ListAccountRolesInput, func(*sso.ListAccountRolesOutput, bool) bool) error + ListAccountRolesPagesWithContext(aws.Context, *sso.ListAccountRolesInput, func(*sso.ListAccountRolesOutput, bool) bool, ...request.Option) error + + ListAccounts(*sso.ListAccountsInput) (*sso.ListAccountsOutput, error) + ListAccountsWithContext(aws.Context, *sso.ListAccountsInput, ...request.Option) (*sso.ListAccountsOutput, error) + ListAccountsRequest(*sso.ListAccountsInput) (*request.Request, *sso.ListAccountsOutput) + + ListAccountsPages(*sso.ListAccountsInput, func(*sso.ListAccountsOutput, bool) bool) error + ListAccountsPagesWithContext(aws.Context, *sso.ListAccountsInput, func(*sso.ListAccountsOutput, bool) bool, ...request.Option) error + + Logout(*sso.LogoutInput) (*sso.LogoutOutput, error) + LogoutWithContext(aws.Context, *sso.LogoutInput, ...request.Option) (*sso.LogoutOutput, error) + LogoutRequest(*sso.LogoutInput) (*request.Request, *sso.LogoutOutput) +} + +var _ SSOAPI = (*sso.SSO)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go new file mode 100644 index 000000000..827bd5194 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go @@ -0,0 +1,2406 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ssooidc + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" +) + +const opCreateToken = "CreateToken" + +// CreateTokenRequest generates a "aws/request.Request" representing the +// client's request for the CreateToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateToken for more information on using the CreateToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateTokenRequest method. +// req, resp := client.CreateTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateToken +func (c *SSOOIDC) CreateTokenRequest(input *CreateTokenInput) (req *request.Request, output *CreateTokenOutput) { + op := &request.Operation{ + Name: opCreateToken, + HTTPMethod: "POST", + HTTPPath: "/token", + } + + if input == nil { + input = &CreateTokenInput{} + } + + output = &CreateTokenOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// CreateToken API operation for AWS SSO OIDC. +// +// Creates and returns access and refresh tokens for clients that are authenticated +// using client secrets. The access token can be used to fetch short-term credentials +// for the assigned AWS accounts or to access application APIs using bearer +// authentication. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SSO OIDC's +// API operation CreateToken for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - InvalidClientException +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret. +// +// - InvalidGrantException +// Indicates that a request contains an invalid grant. This can occur if a client +// makes a CreateToken request with an invalid grant type. +// +// - UnauthorizedClientException +// Indicates that the client is not currently authorized to make the request. +// This can happen when a clientId is not issued for a public client. +// +// - UnsupportedGrantTypeException +// Indicates that the grant type in the request is not supported by the service. +// +// - InvalidScopeException +// Indicates that the scope provided in the request is invalid. +// +// - AuthorizationPendingException +// Indicates that a request to authorize a client with an access user session +// token is pending. +// +// - SlowDownException +// Indicates that the client is making the request too frequently and is more +// than the service can handle. +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. +// +// - ExpiredTokenException +// Indicates that the token issued by the service is expired and is no longer +// valid. +// +// - InternalServerException +// Indicates that an error from the service occurred while trying to process +// a request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateToken +func (c *SSOOIDC) CreateToken(input *CreateTokenInput) (*CreateTokenOutput, error) { + req, out := c.CreateTokenRequest(input) + return out, req.Send() +} + +// CreateTokenWithContext is the same as CreateToken with the addition of +// the ability to pass a context and additional request options. +// +// See CreateToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSOOIDC) CreateTokenWithContext(ctx aws.Context, input *CreateTokenInput, opts ...request.Option) (*CreateTokenOutput, error) { + req, out := c.CreateTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateTokenWithIAM = "CreateTokenWithIAM" + +// CreateTokenWithIAMRequest generates a "aws/request.Request" representing the +// client's request for the CreateTokenWithIAM operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTokenWithIAM for more information on using the CreateTokenWithIAM +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateTokenWithIAMRequest method. +// req, resp := client.CreateTokenWithIAMRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateTokenWithIAM +func (c *SSOOIDC) CreateTokenWithIAMRequest(input *CreateTokenWithIAMInput) (req *request.Request, output *CreateTokenWithIAMOutput) { + op := &request.Operation{ + Name: opCreateTokenWithIAM, + HTTPMethod: "POST", + HTTPPath: "/token?aws_iam=t", + } + + if input == nil { + input = &CreateTokenWithIAMInput{} + } + + output = &CreateTokenWithIAMOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateTokenWithIAM API operation for AWS SSO OIDC. +// +// Creates and returns access and refresh tokens for clients and applications +// that are authenticated using IAM entities. The access token can be used to +// fetch short-term credentials for the assigned Amazon Web Services accounts +// or to access application APIs using bearer authentication. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SSO OIDC's +// API operation CreateTokenWithIAM for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - InvalidClientException +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret. +// +// - InvalidGrantException +// Indicates that a request contains an invalid grant. This can occur if a client +// makes a CreateToken request with an invalid grant type. +// +// - UnauthorizedClientException +// Indicates that the client is not currently authorized to make the request. +// This can happen when a clientId is not issued for a public client. +// +// - UnsupportedGrantTypeException +// Indicates that the grant type in the request is not supported by the service. +// +// - InvalidScopeException +// Indicates that the scope provided in the request is invalid. +// +// - AuthorizationPendingException +// Indicates that a request to authorize a client with an access user session +// token is pending. +// +// - SlowDownException +// Indicates that the client is making the request too frequently and is more +// than the service can handle. +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. +// +// - ExpiredTokenException +// Indicates that the token issued by the service is expired and is no longer +// valid. +// +// - InternalServerException +// Indicates that an error from the service occurred while trying to process +// a request. +// +// - InvalidRequestRegionException +// Indicates that a token provided as input to the request was issued by and +// is only usable by calling IAM Identity Center endpoints in another region. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateTokenWithIAM +func (c *SSOOIDC) CreateTokenWithIAM(input *CreateTokenWithIAMInput) (*CreateTokenWithIAMOutput, error) { + req, out := c.CreateTokenWithIAMRequest(input) + return out, req.Send() +} + +// CreateTokenWithIAMWithContext is the same as CreateTokenWithIAM with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTokenWithIAM for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSOOIDC) CreateTokenWithIAMWithContext(ctx aws.Context, input *CreateTokenWithIAMInput, opts ...request.Option) (*CreateTokenWithIAMOutput, error) { + req, out := c.CreateTokenWithIAMRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRegisterClient = "RegisterClient" + +// RegisterClientRequest generates a "aws/request.Request" representing the +// client's request for the RegisterClient operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RegisterClient for more information on using the RegisterClient +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the RegisterClientRequest method. +// req, resp := client.RegisterClientRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/RegisterClient +func (c *SSOOIDC) RegisterClientRequest(input *RegisterClientInput) (req *request.Request, output *RegisterClientOutput) { + op := &request.Operation{ + Name: opRegisterClient, + HTTPMethod: "POST", + HTTPPath: "/client/register", + } + + if input == nil { + input = &RegisterClientInput{} + } + + output = &RegisterClientOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// RegisterClient API operation for AWS SSO OIDC. +// +// Registers a client with IAM Identity Center. This allows clients to initiate +// device authorization. The output should be persisted for reuse through many +// authentication requests. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SSO OIDC's +// API operation RegisterClient for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - InvalidScopeException +// Indicates that the scope provided in the request is invalid. +// +// - InvalidClientMetadataException +// Indicates that the client information sent in the request during registration +// is invalid. +// +// - InternalServerException +// Indicates that an error from the service occurred while trying to process +// a request. +// +// - InvalidRedirectUriException +// Indicates that one or more redirect URI in the request is not supported for +// this operation. +// +// - UnsupportedGrantTypeException +// Indicates that the grant type in the request is not supported by the service. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/RegisterClient +func (c *SSOOIDC) RegisterClient(input *RegisterClientInput) (*RegisterClientOutput, error) { + req, out := c.RegisterClientRequest(input) + return out, req.Send() +} + +// RegisterClientWithContext is the same as RegisterClient with the addition of +// the ability to pass a context and additional request options. +// +// See RegisterClient for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSOOIDC) RegisterClientWithContext(ctx aws.Context, input *RegisterClientInput, opts ...request.Option) (*RegisterClientOutput, error) { + req, out := c.RegisterClientRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartDeviceAuthorization = "StartDeviceAuthorization" + +// StartDeviceAuthorizationRequest generates a "aws/request.Request" representing the +// client's request for the StartDeviceAuthorization operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartDeviceAuthorization for more information on using the StartDeviceAuthorization +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the StartDeviceAuthorizationRequest method. +// req, resp := client.StartDeviceAuthorizationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/StartDeviceAuthorization +func (c *SSOOIDC) StartDeviceAuthorizationRequest(input *StartDeviceAuthorizationInput) (req *request.Request, output *StartDeviceAuthorizationOutput) { + op := &request.Operation{ + Name: opStartDeviceAuthorization, + HTTPMethod: "POST", + HTTPPath: "/device_authorization", + } + + if input == nil { + input = &StartDeviceAuthorizationInput{} + } + + output = &StartDeviceAuthorizationOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// StartDeviceAuthorization API operation for AWS SSO OIDC. +// +// Initiates device authorization by requesting a pair of verification codes +// from the authorization service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SSO OIDC's +// API operation StartDeviceAuthorization for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - InvalidClientException +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret. +// +// - UnauthorizedClientException +// Indicates that the client is not currently authorized to make the request. +// This can happen when a clientId is not issued for a public client. +// +// - SlowDownException +// Indicates that the client is making the request too frequently and is more +// than the service can handle. +// +// - InternalServerException +// Indicates that an error from the service occurred while trying to process +// a request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/StartDeviceAuthorization +func (c *SSOOIDC) StartDeviceAuthorization(input *StartDeviceAuthorizationInput) (*StartDeviceAuthorizationOutput, error) { + req, out := c.StartDeviceAuthorizationRequest(input) + return out, req.Send() +} + +// StartDeviceAuthorizationWithContext is the same as StartDeviceAuthorization with the addition of +// the ability to pass a context and additional request options. +// +// See StartDeviceAuthorization for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSOOIDC) StartDeviceAuthorizationWithContext(ctx aws.Context, input *StartDeviceAuthorizationInput, opts ...request.Option) (*StartDeviceAuthorizationOutput, error) { + req, out := c.StartDeviceAuthorizationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// You do not have sufficient access to perform this action. +type AccessDeniedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be access_denied. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessDeniedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessDeniedException) GoString() string { + return s.String() +} + +func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { + return &AccessDeniedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AccessDeniedException) Code() string { + return "AccessDeniedException" +} + +// Message returns the exception's message. +func (s *AccessDeniedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AccessDeniedException) OrigErr() error { + return nil +} + +func (s *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that a request to authorize a client with an access user session +// token is pending. +type AuthorizationPendingException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be authorization_pending. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AuthorizationPendingException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AuthorizationPendingException) GoString() string { + return s.String() +} + +func newErrorAuthorizationPendingException(v protocol.ResponseMetadata) error { + return &AuthorizationPendingException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AuthorizationPendingException) Code() string { + return "AuthorizationPendingException" +} + +// Message returns the exception's message. +func (s *AuthorizationPendingException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AuthorizationPendingException) OrigErr() error { + return nil +} + +func (s *AuthorizationPendingException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AuthorizationPendingException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AuthorizationPendingException) RequestID() string { + return s.RespMetadata.RequestID +} + +type CreateTokenInput struct { + _ struct{} `type:"structure"` + + // The unique identifier string for the client or application. This value comes + // from the result of the RegisterClient API. + // + // ClientId is a required field + ClientId *string `locationName:"clientId" type:"string" required:"true"` + + // A secret string generated for the client. This value should come from the + // persisted result of the RegisterClient API. + // + // ClientSecret is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenInput's + // String and GoString methods. + // + // ClientSecret is a required field + ClientSecret *string `locationName:"clientSecret" type:"string" required:"true" sensitive:"true"` + + // Used only when calling this API for the Authorization Code grant type. The + // short-term code is used to identify this authorization request. This grant + // type is currently unsupported for the CreateToken API. + Code *string `locationName:"code" type:"string"` + + // Used only when calling this API for the Authorization Code grant type. This + // value is generated by the client and presented to validate the original code + // challenge value the client passed at authorization time. + // + // CodeVerifier is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenInput's + // String and GoString methods. + CodeVerifier *string `locationName:"codeVerifier" type:"string" sensitive:"true"` + + // Used only when calling this API for the Device Code grant type. This short-term + // code is used to identify this authorization request. This comes from the + // result of the StartDeviceAuthorization API. + DeviceCode *string `locationName:"deviceCode" type:"string"` + + // Supports the following OAuth grant types: Device Code and Refresh Token. + // Specify either of the following values, depending on the grant type that + // you want: + // + // * Device Code - urn:ietf:params:oauth:grant-type:device_code + // + // * Refresh Token - refresh_token + // + // For information about how to obtain the device code, see the StartDeviceAuthorization + // topic. + // + // GrantType is a required field + GrantType *string `locationName:"grantType" type:"string" required:"true"` + + // Used only when calling this API for the Authorization Code grant type. This + // value specifies the location of the client or application that has registered + // to receive the authorization code. + RedirectUri *string `locationName:"redirectUri" type:"string"` + + // Used only when calling this API for the Refresh Token grant type. This token + // is used to refresh short-term tokens, such as the access token, that might + // expire. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide + // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // RefreshToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenInput's + // String and GoString methods. + RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"` + + // The list of scopes for which authorization is requested. The access token + // that is issued is limited to the scopes that are granted. If this value is + // not specified, IAM Identity Center authorizes all scopes that are configured + // for the client during the call to RegisterClient. + Scope []*string `locationName:"scope" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTokenInput"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.ClientSecret == nil { + invalidParams.Add(request.NewErrParamRequired("ClientSecret")) + } + if s.GrantType == nil { + invalidParams.Add(request.NewErrParamRequired("GrantType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientId sets the ClientId field's value. +func (s *CreateTokenInput) SetClientId(v string) *CreateTokenInput { + s.ClientId = &v + return s +} + +// SetClientSecret sets the ClientSecret field's value. +func (s *CreateTokenInput) SetClientSecret(v string) *CreateTokenInput { + s.ClientSecret = &v + return s +} + +// SetCode sets the Code field's value. +func (s *CreateTokenInput) SetCode(v string) *CreateTokenInput { + s.Code = &v + return s +} + +// SetCodeVerifier sets the CodeVerifier field's value. +func (s *CreateTokenInput) SetCodeVerifier(v string) *CreateTokenInput { + s.CodeVerifier = &v + return s +} + +// SetDeviceCode sets the DeviceCode field's value. +func (s *CreateTokenInput) SetDeviceCode(v string) *CreateTokenInput { + s.DeviceCode = &v + return s +} + +// SetGrantType sets the GrantType field's value. +func (s *CreateTokenInput) SetGrantType(v string) *CreateTokenInput { + s.GrantType = &v + return s +} + +// SetRedirectUri sets the RedirectUri field's value. +func (s *CreateTokenInput) SetRedirectUri(v string) *CreateTokenInput { + s.RedirectUri = &v + return s +} + +// SetRefreshToken sets the RefreshToken field's value. +func (s *CreateTokenInput) SetRefreshToken(v string) *CreateTokenInput { + s.RefreshToken = &v + return s +} + +// SetScope sets the Scope field's value. +func (s *CreateTokenInput) SetScope(v []*string) *CreateTokenInput { + s.Scope = v + return s +} + +type CreateTokenOutput struct { + _ struct{} `type:"structure"` + + // A bearer token to access Amazon Web Services accounts and applications assigned + // to a user. + // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenOutput's + // String and GoString methods. + AccessToken *string `locationName:"accessToken" type:"string" sensitive:"true"` + + // Indicates the time in seconds when an access token will expire. + ExpiresIn *int64 `locationName:"expiresIn" type:"integer"` + + // The idToken is not implemented or supported. For more information about the + // features and limitations of the current IAM Identity Center OIDC implementation, + // see Considerations for Using this Guide in the IAM Identity Center OIDC API + // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // A JSON Web Token (JWT) that identifies who is associated with the issued + // access token. + // + // IdToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenOutput's + // String and GoString methods. + IdToken *string `locationName:"idToken" type:"string" sensitive:"true"` + + // A token that, if present, can be used to refresh a previously issued access + // token that might have expired. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide + // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // RefreshToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenOutput's + // String and GoString methods. + RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"` + + // Used to notify the client that the returned token is an access token. The + // supported token type is Bearer. + TokenType *string `locationName:"tokenType" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenOutput) GoString() string { + return s.String() +} + +// SetAccessToken sets the AccessToken field's value. +func (s *CreateTokenOutput) SetAccessToken(v string) *CreateTokenOutput { + s.AccessToken = &v + return s +} + +// SetExpiresIn sets the ExpiresIn field's value. +func (s *CreateTokenOutput) SetExpiresIn(v int64) *CreateTokenOutput { + s.ExpiresIn = &v + return s +} + +// SetIdToken sets the IdToken field's value. +func (s *CreateTokenOutput) SetIdToken(v string) *CreateTokenOutput { + s.IdToken = &v + return s +} + +// SetRefreshToken sets the RefreshToken field's value. +func (s *CreateTokenOutput) SetRefreshToken(v string) *CreateTokenOutput { + s.RefreshToken = &v + return s +} + +// SetTokenType sets the TokenType field's value. +func (s *CreateTokenOutput) SetTokenType(v string) *CreateTokenOutput { + s.TokenType = &v + return s +} + +type CreateTokenWithIAMInput struct { + _ struct{} `type:"structure"` + + // Used only when calling this API for the JWT Bearer grant type. This value + // specifies the JSON Web Token (JWT) issued by a trusted token issuer. To authorize + // a trusted token issuer, configure the JWT Bearer GrantOptions for the application. + // + // Assertion is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's + // String and GoString methods. + Assertion *string `locationName:"assertion" type:"string" sensitive:"true"` + + // The unique identifier string for the client or application. This value is + // an application ARN that has OAuth grants configured. + // + // ClientId is a required field + ClientId *string `locationName:"clientId" type:"string" required:"true"` + + // Used only when calling this API for the Authorization Code grant type. This + // short-term code is used to identify this authorization request. The code + // is obtained through a redirect from IAM Identity Center to a redirect URI + // persisted in the Authorization Code GrantOptions for the application. + Code *string `locationName:"code" type:"string"` + + // Used only when calling this API for the Authorization Code grant type. This + // value is generated by the client and presented to validate the original code + // challenge value the client passed at authorization time. + // + // CodeVerifier is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's + // String and GoString methods. + CodeVerifier *string `locationName:"codeVerifier" type:"string" sensitive:"true"` + + // Supports the following OAuth grant types: Authorization Code, Refresh Token, + // JWT Bearer, and Token Exchange. Specify one of the following values, depending + // on the grant type that you want: + // + // * Authorization Code - authorization_code + // + // * Refresh Token - refresh_token + // + // * JWT Bearer - urn:ietf:params:oauth:grant-type:jwt-bearer + // + // * Token Exchange - urn:ietf:params:oauth:grant-type:token-exchange + // + // GrantType is a required field + GrantType *string `locationName:"grantType" type:"string" required:"true"` + + // Used only when calling this API for the Authorization Code grant type. This + // value specifies the location of the client or application that has registered + // to receive the authorization code. + RedirectUri *string `locationName:"redirectUri" type:"string"` + + // Used only when calling this API for the Refresh Token grant type. This token + // is used to refresh short-term tokens, such as the access token, that might + // expire. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide + // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // RefreshToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's + // String and GoString methods. + RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"` + + // Used only when calling this API for the Token Exchange grant type. This value + // specifies the type of token that the requester can receive. The following + // values are supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token + // + // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token + RequestedTokenType *string `locationName:"requestedTokenType" type:"string"` + + // The list of scopes for which authorization is requested. The access token + // that is issued is limited to the scopes that are granted. If the value is + // not specified, IAM Identity Center authorizes all scopes configured for the + // application, including the following default scopes: openid, aws, sts:identity_context. + Scope []*string `locationName:"scope" type:"list"` + + // Used only when calling this API for the Token Exchange grant type. This value + // specifies the subject of the exchange. The value of the subject token must + // be an access token issued by IAM Identity Center to a different client or + // application. The access token must have authorized scopes that indicate the + // requested application as a target audience. + // + // SubjectToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's + // String and GoString methods. + SubjectToken *string `locationName:"subjectToken" type:"string" sensitive:"true"` + + // Used only when calling this API for the Token Exchange grant type. This value + // specifies the type of token that is passed as the subject of the exchange. + // The following value is supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token + SubjectTokenType *string `locationName:"subjectTokenType" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenWithIAMInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenWithIAMInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTokenWithIAMInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTokenWithIAMInput"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.GrantType == nil { + invalidParams.Add(request.NewErrParamRequired("GrantType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssertion sets the Assertion field's value. +func (s *CreateTokenWithIAMInput) SetAssertion(v string) *CreateTokenWithIAMInput { + s.Assertion = &v + return s +} + +// SetClientId sets the ClientId field's value. +func (s *CreateTokenWithIAMInput) SetClientId(v string) *CreateTokenWithIAMInput { + s.ClientId = &v + return s +} + +// SetCode sets the Code field's value. +func (s *CreateTokenWithIAMInput) SetCode(v string) *CreateTokenWithIAMInput { + s.Code = &v + return s +} + +// SetCodeVerifier sets the CodeVerifier field's value. +func (s *CreateTokenWithIAMInput) SetCodeVerifier(v string) *CreateTokenWithIAMInput { + s.CodeVerifier = &v + return s +} + +// SetGrantType sets the GrantType field's value. +func (s *CreateTokenWithIAMInput) SetGrantType(v string) *CreateTokenWithIAMInput { + s.GrantType = &v + return s +} + +// SetRedirectUri sets the RedirectUri field's value. +func (s *CreateTokenWithIAMInput) SetRedirectUri(v string) *CreateTokenWithIAMInput { + s.RedirectUri = &v + return s +} + +// SetRefreshToken sets the RefreshToken field's value. +func (s *CreateTokenWithIAMInput) SetRefreshToken(v string) *CreateTokenWithIAMInput { + s.RefreshToken = &v + return s +} + +// SetRequestedTokenType sets the RequestedTokenType field's value. +func (s *CreateTokenWithIAMInput) SetRequestedTokenType(v string) *CreateTokenWithIAMInput { + s.RequestedTokenType = &v + return s +} + +// SetScope sets the Scope field's value. +func (s *CreateTokenWithIAMInput) SetScope(v []*string) *CreateTokenWithIAMInput { + s.Scope = v + return s +} + +// SetSubjectToken sets the SubjectToken field's value. +func (s *CreateTokenWithIAMInput) SetSubjectToken(v string) *CreateTokenWithIAMInput { + s.SubjectToken = &v + return s +} + +// SetSubjectTokenType sets the SubjectTokenType field's value. +func (s *CreateTokenWithIAMInput) SetSubjectTokenType(v string) *CreateTokenWithIAMInput { + s.SubjectTokenType = &v + return s +} + +type CreateTokenWithIAMOutput struct { + _ struct{} `type:"structure"` + + // A bearer token to access Amazon Web Services accounts and applications assigned + // to a user. + // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's + // String and GoString methods. + AccessToken *string `locationName:"accessToken" type:"string" sensitive:"true"` + + // Indicates the time in seconds when an access token will expire. + ExpiresIn *int64 `locationName:"expiresIn" type:"integer"` + + // A JSON Web Token (JWT) that identifies the user associated with the issued + // access token. + // + // IdToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's + // String and GoString methods. + IdToken *string `locationName:"idToken" type:"string" sensitive:"true"` + + // Indicates the type of tokens that are issued by IAM Identity Center. The + // following values are supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token + // + // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token + IssuedTokenType *string `locationName:"issuedTokenType" type:"string"` + + // A token that, if present, can be used to refresh a previously issued access + // token that might have expired. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide + // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // RefreshToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's + // String and GoString methods. + RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"` + + // The list of scopes for which authorization is granted. The access token that + // is issued is limited to the scopes that are granted. + Scope []*string `locationName:"scope" type:"list"` + + // Used to notify the requester that the returned token is an access token. + // The supported token type is Bearer. + TokenType *string `locationName:"tokenType" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenWithIAMOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenWithIAMOutput) GoString() string { + return s.String() +} + +// SetAccessToken sets the AccessToken field's value. +func (s *CreateTokenWithIAMOutput) SetAccessToken(v string) *CreateTokenWithIAMOutput { + s.AccessToken = &v + return s +} + +// SetExpiresIn sets the ExpiresIn field's value. +func (s *CreateTokenWithIAMOutput) SetExpiresIn(v int64) *CreateTokenWithIAMOutput { + s.ExpiresIn = &v + return s +} + +// SetIdToken sets the IdToken field's value. +func (s *CreateTokenWithIAMOutput) SetIdToken(v string) *CreateTokenWithIAMOutput { + s.IdToken = &v + return s +} + +// SetIssuedTokenType sets the IssuedTokenType field's value. +func (s *CreateTokenWithIAMOutput) SetIssuedTokenType(v string) *CreateTokenWithIAMOutput { + s.IssuedTokenType = &v + return s +} + +// SetRefreshToken sets the RefreshToken field's value. +func (s *CreateTokenWithIAMOutput) SetRefreshToken(v string) *CreateTokenWithIAMOutput { + s.RefreshToken = &v + return s +} + +// SetScope sets the Scope field's value. +func (s *CreateTokenWithIAMOutput) SetScope(v []*string) *CreateTokenWithIAMOutput { + s.Scope = v + return s +} + +// SetTokenType sets the TokenType field's value. +func (s *CreateTokenWithIAMOutput) SetTokenType(v string) *CreateTokenWithIAMOutput { + s.TokenType = &v + return s +} + +// Indicates that the token issued by the service is expired and is no longer +// valid. +type ExpiredTokenException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be expired_token. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExpiredTokenException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExpiredTokenException) GoString() string { + return s.String() +} + +func newErrorExpiredTokenException(v protocol.ResponseMetadata) error { + return &ExpiredTokenException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ExpiredTokenException) Code() string { + return "ExpiredTokenException" +} + +// Message returns the exception's message. +func (s *ExpiredTokenException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ExpiredTokenException) OrigErr() error { + return nil +} + +func (s *ExpiredTokenException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ExpiredTokenException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ExpiredTokenException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that an error from the service occurred while trying to process +// a request. +type InternalServerException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be server_error. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InternalServerException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InternalServerException) GoString() string { + return s.String() +} + +func newErrorInternalServerException(v protocol.ResponseMetadata) error { + return &InternalServerException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InternalServerException) Code() string { + return "InternalServerException" +} + +// Message returns the exception's message. +func (s *InternalServerException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InternalServerException) OrigErr() error { + return nil +} + +func (s *InternalServerException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InternalServerException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InternalServerException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret. +type InvalidClientException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be invalid_client. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidClientException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidClientException) GoString() string { + return s.String() +} + +func newErrorInvalidClientException(v protocol.ResponseMetadata) error { + return &InvalidClientException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidClientException) Code() string { + return "InvalidClientException" +} + +// Message returns the exception's message. +func (s *InvalidClientException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidClientException) OrigErr() error { + return nil +} + +func (s *InvalidClientException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidClientException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidClientException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the client information sent in the request during registration +// is invalid. +type InvalidClientMetadataException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be invalid_client_metadata. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidClientMetadataException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidClientMetadataException) GoString() string { + return s.String() +} + +func newErrorInvalidClientMetadataException(v protocol.ResponseMetadata) error { + return &InvalidClientMetadataException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidClientMetadataException) Code() string { + return "InvalidClientMetadataException" +} + +// Message returns the exception's message. +func (s *InvalidClientMetadataException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidClientMetadataException) OrigErr() error { + return nil +} + +func (s *InvalidClientMetadataException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidClientMetadataException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidClientMetadataException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that a request contains an invalid grant. This can occur if a client +// makes a CreateToken request with an invalid grant type. +type InvalidGrantException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be invalid_grant. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidGrantException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidGrantException) GoString() string { + return s.String() +} + +func newErrorInvalidGrantException(v protocol.ResponseMetadata) error { + return &InvalidGrantException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidGrantException) Code() string { + return "InvalidGrantException" +} + +// Message returns the exception's message. +func (s *InvalidGrantException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidGrantException) OrigErr() error { + return nil +} + +func (s *InvalidGrantException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidGrantException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidGrantException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that one or more redirect URI in the request is not supported for +// this operation. +type InvalidRedirectUriException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be invalid_redirect_uri. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRedirectUriException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRedirectUriException) GoString() string { + return s.String() +} + +func newErrorInvalidRedirectUriException(v protocol.ResponseMetadata) error { + return &InvalidRedirectUriException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRedirectUriException) Code() string { + return "InvalidRedirectUriException" +} + +// Message returns the exception's message. +func (s *InvalidRedirectUriException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRedirectUriException) OrigErr() error { + return nil +} + +func (s *InvalidRedirectUriException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRedirectUriException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRedirectUriException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +type InvalidRequestException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be invalid_request. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestException) GoString() string { + return s.String() +} + +func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { + return &InvalidRequestException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRequestException) Code() string { + return "InvalidRequestException" +} + +// Message returns the exception's message. +func (s *InvalidRequestException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRequestException) OrigErr() error { + return nil +} + +func (s *InvalidRequestException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that a token provided as input to the request was issued by and +// is only usable by calling IAM Identity Center endpoints in another region. +type InvalidRequestRegionException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Indicates the IAM Identity Center endpoint which the requester may call with + // this token. + Endpoint *string `locationName:"endpoint" type:"string"` + + // Single error code. For this exception the value will be invalid_request. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` + + // Indicates the region which the requester may call with this token. + Region *string `locationName:"region" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestRegionException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestRegionException) GoString() string { + return s.String() +} + +func newErrorInvalidRequestRegionException(v protocol.ResponseMetadata) error { + return &InvalidRequestRegionException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRequestRegionException) Code() string { + return "InvalidRequestRegionException" +} + +// Message returns the exception's message. +func (s *InvalidRequestRegionException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRequestRegionException) OrigErr() error { + return nil +} + +func (s *InvalidRequestRegionException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRequestRegionException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRequestRegionException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the scope provided in the request is invalid. +type InvalidScopeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be invalid_scope. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidScopeException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidScopeException) GoString() string { + return s.String() +} + +func newErrorInvalidScopeException(v protocol.ResponseMetadata) error { + return &InvalidScopeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidScopeException) Code() string { + return "InvalidScopeException" +} + +// Message returns the exception's message. +func (s *InvalidScopeException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidScopeException) OrigErr() error { + return nil +} + +func (s *InvalidScopeException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidScopeException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidScopeException) RequestID() string { + return s.RespMetadata.RequestID +} + +type RegisterClientInput struct { + _ struct{} `type:"structure"` + + // The friendly name of the client. + // + // ClientName is a required field + ClientName *string `locationName:"clientName" type:"string" required:"true"` + + // The type of client. The service supports only public as a client type. Anything + // other than public will be rejected by the service. + // + // ClientType is a required field + ClientType *string `locationName:"clientType" type:"string" required:"true"` + + // This IAM Identity Center application ARN is used to define administrator-managed + // configuration for public client access to resources. At authorization, the + // scopes, grants, and redirect URI available to this client will be restricted + // by this application resource. + EntitledApplicationArn *string `locationName:"entitledApplicationArn" type:"string"` + + // The list of OAuth 2.0 grant types that are defined by the client. This list + // is used to restrict the token granting flows available to the client. + GrantTypes []*string `locationName:"grantTypes" type:"list"` + + // The IAM Identity Center Issuer URL associated with an instance of IAM Identity + // Center. This value is needed for user access to resources through the client. + IssuerUrl *string `locationName:"issuerUrl" type:"string"` + + // The list of redirect URI that are defined by the client. At completion of + // authorization, this list is used to restrict what locations the user agent + // can be redirected back to. + RedirectUris []*string `locationName:"redirectUris" type:"list"` + + // The list of scopes that are defined by the client. Upon authorization, this + // list is used to restrict permissions when granting an access token. + Scopes []*string `locationName:"scopes" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegisterClientInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegisterClientInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterClientInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterClientInput"} + if s.ClientName == nil { + invalidParams.Add(request.NewErrParamRequired("ClientName")) + } + if s.ClientType == nil { + invalidParams.Add(request.NewErrParamRequired("ClientType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientName sets the ClientName field's value. +func (s *RegisterClientInput) SetClientName(v string) *RegisterClientInput { + s.ClientName = &v + return s +} + +// SetClientType sets the ClientType field's value. +func (s *RegisterClientInput) SetClientType(v string) *RegisterClientInput { + s.ClientType = &v + return s +} + +// SetEntitledApplicationArn sets the EntitledApplicationArn field's value. +func (s *RegisterClientInput) SetEntitledApplicationArn(v string) *RegisterClientInput { + s.EntitledApplicationArn = &v + return s +} + +// SetGrantTypes sets the GrantTypes field's value. +func (s *RegisterClientInput) SetGrantTypes(v []*string) *RegisterClientInput { + s.GrantTypes = v + return s +} + +// SetIssuerUrl sets the IssuerUrl field's value. +func (s *RegisterClientInput) SetIssuerUrl(v string) *RegisterClientInput { + s.IssuerUrl = &v + return s +} + +// SetRedirectUris sets the RedirectUris field's value. +func (s *RegisterClientInput) SetRedirectUris(v []*string) *RegisterClientInput { + s.RedirectUris = v + return s +} + +// SetScopes sets the Scopes field's value. +func (s *RegisterClientInput) SetScopes(v []*string) *RegisterClientInput { + s.Scopes = v + return s +} + +type RegisterClientOutput struct { + _ struct{} `type:"structure"` + + // An endpoint that the client can use to request authorization. + AuthorizationEndpoint *string `locationName:"authorizationEndpoint" type:"string"` + + // The unique identifier string for each client. This client uses this identifier + // to get authenticated by the service in subsequent calls. + ClientId *string `locationName:"clientId" type:"string"` + + // Indicates the time at which the clientId and clientSecret were issued. + ClientIdIssuedAt *int64 `locationName:"clientIdIssuedAt" type:"long"` + + // A secret string generated for the client. The client will use this string + // to get authenticated by the service in subsequent calls. + // + // ClientSecret is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by RegisterClientOutput's + // String and GoString methods. + ClientSecret *string `locationName:"clientSecret" type:"string" sensitive:"true"` + + // Indicates the time at which the clientId and clientSecret will become invalid. + ClientSecretExpiresAt *int64 `locationName:"clientSecretExpiresAt" type:"long"` + + // An endpoint that the client can use to create tokens. + TokenEndpoint *string `locationName:"tokenEndpoint" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegisterClientOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegisterClientOutput) GoString() string { + return s.String() +} + +// SetAuthorizationEndpoint sets the AuthorizationEndpoint field's value. +func (s *RegisterClientOutput) SetAuthorizationEndpoint(v string) *RegisterClientOutput { + s.AuthorizationEndpoint = &v + return s +} + +// SetClientId sets the ClientId field's value. +func (s *RegisterClientOutput) SetClientId(v string) *RegisterClientOutput { + s.ClientId = &v + return s +} + +// SetClientIdIssuedAt sets the ClientIdIssuedAt field's value. +func (s *RegisterClientOutput) SetClientIdIssuedAt(v int64) *RegisterClientOutput { + s.ClientIdIssuedAt = &v + return s +} + +// SetClientSecret sets the ClientSecret field's value. +func (s *RegisterClientOutput) SetClientSecret(v string) *RegisterClientOutput { + s.ClientSecret = &v + return s +} + +// SetClientSecretExpiresAt sets the ClientSecretExpiresAt field's value. +func (s *RegisterClientOutput) SetClientSecretExpiresAt(v int64) *RegisterClientOutput { + s.ClientSecretExpiresAt = &v + return s +} + +// SetTokenEndpoint sets the TokenEndpoint field's value. +func (s *RegisterClientOutput) SetTokenEndpoint(v string) *RegisterClientOutput { + s.TokenEndpoint = &v + return s +} + +// Indicates that the client is making the request too frequently and is more +// than the service can handle. +type SlowDownException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be slow_down. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SlowDownException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SlowDownException) GoString() string { + return s.String() +} + +func newErrorSlowDownException(v protocol.ResponseMetadata) error { + return &SlowDownException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *SlowDownException) Code() string { + return "SlowDownException" +} + +// Message returns the exception's message. +func (s *SlowDownException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *SlowDownException) OrigErr() error { + return nil +} + +func (s *SlowDownException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *SlowDownException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *SlowDownException) RequestID() string { + return s.RespMetadata.RequestID +} + +type StartDeviceAuthorizationInput struct { + _ struct{} `type:"structure"` + + // The unique identifier string for the client that is registered with IAM Identity + // Center. This value should come from the persisted result of the RegisterClient + // API operation. + // + // ClientId is a required field + ClientId *string `locationName:"clientId" type:"string" required:"true"` + + // A secret string that is generated for the client. This value should come + // from the persisted result of the RegisterClient API operation. + // + // ClientSecret is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by StartDeviceAuthorizationInput's + // String and GoString methods. + // + // ClientSecret is a required field + ClientSecret *string `locationName:"clientSecret" type:"string" required:"true" sensitive:"true"` + + // The URL for the Amazon Web Services access portal. For more information, + // see Using the Amazon Web Services access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html) + // in the IAM Identity Center User Guide. + // + // StartUrl is a required field + StartUrl *string `locationName:"startUrl" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartDeviceAuthorizationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartDeviceAuthorizationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartDeviceAuthorizationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartDeviceAuthorizationInput"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.ClientSecret == nil { + invalidParams.Add(request.NewErrParamRequired("ClientSecret")) + } + if s.StartUrl == nil { + invalidParams.Add(request.NewErrParamRequired("StartUrl")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientId sets the ClientId field's value. +func (s *StartDeviceAuthorizationInput) SetClientId(v string) *StartDeviceAuthorizationInput { + s.ClientId = &v + return s +} + +// SetClientSecret sets the ClientSecret field's value. +func (s *StartDeviceAuthorizationInput) SetClientSecret(v string) *StartDeviceAuthorizationInput { + s.ClientSecret = &v + return s +} + +// SetStartUrl sets the StartUrl field's value. +func (s *StartDeviceAuthorizationInput) SetStartUrl(v string) *StartDeviceAuthorizationInput { + s.StartUrl = &v + return s +} + +type StartDeviceAuthorizationOutput struct { + _ struct{} `type:"structure"` + + // The short-lived code that is used by the device when polling for a session + // token. + DeviceCode *string `locationName:"deviceCode" type:"string"` + + // Indicates the number of seconds in which the verification code will become + // invalid. + ExpiresIn *int64 `locationName:"expiresIn" type:"integer"` + + // Indicates the number of seconds the client must wait between attempts when + // polling for a session. + Interval *int64 `locationName:"interval" type:"integer"` + + // A one-time user verification code. This is needed to authorize an in-use + // device. + UserCode *string `locationName:"userCode" type:"string"` + + // The URI of the verification page that takes the userCode to authorize the + // device. + VerificationUri *string `locationName:"verificationUri" type:"string"` + + // An alternate URL that the client can use to automatically launch a browser. + // This process skips the manual step in which the user visits the verification + // page and enters their code. + VerificationUriComplete *string `locationName:"verificationUriComplete" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartDeviceAuthorizationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartDeviceAuthorizationOutput) GoString() string { + return s.String() +} + +// SetDeviceCode sets the DeviceCode field's value. +func (s *StartDeviceAuthorizationOutput) SetDeviceCode(v string) *StartDeviceAuthorizationOutput { + s.DeviceCode = &v + return s +} + +// SetExpiresIn sets the ExpiresIn field's value. +func (s *StartDeviceAuthorizationOutput) SetExpiresIn(v int64) *StartDeviceAuthorizationOutput { + s.ExpiresIn = &v + return s +} + +// SetInterval sets the Interval field's value. +func (s *StartDeviceAuthorizationOutput) SetInterval(v int64) *StartDeviceAuthorizationOutput { + s.Interval = &v + return s +} + +// SetUserCode sets the UserCode field's value. +func (s *StartDeviceAuthorizationOutput) SetUserCode(v string) *StartDeviceAuthorizationOutput { + s.UserCode = &v + return s +} + +// SetVerificationUri sets the VerificationUri field's value. +func (s *StartDeviceAuthorizationOutput) SetVerificationUri(v string) *StartDeviceAuthorizationOutput { + s.VerificationUri = &v + return s +} + +// SetVerificationUriComplete sets the VerificationUriComplete field's value. +func (s *StartDeviceAuthorizationOutput) SetVerificationUriComplete(v string) *StartDeviceAuthorizationOutput { + s.VerificationUriComplete = &v + return s +} + +// Indicates that the client is not currently authorized to make the request. +// This can happen when a clientId is not issued for a public client. +type UnauthorizedClientException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be unauthorized_client. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnauthorizedClientException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnauthorizedClientException) GoString() string { + return s.String() +} + +func newErrorUnauthorizedClientException(v protocol.ResponseMetadata) error { + return &UnauthorizedClientException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnauthorizedClientException) Code() string { + return "UnauthorizedClientException" +} + +// Message returns the exception's message. +func (s *UnauthorizedClientException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnauthorizedClientException) OrigErr() error { + return nil +} + +func (s *UnauthorizedClientException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnauthorizedClientException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnauthorizedClientException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the grant type in the request is not supported by the service. +type UnsupportedGrantTypeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be unsupported_grant_type. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnsupportedGrantTypeException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnsupportedGrantTypeException) GoString() string { + return s.String() +} + +func newErrorUnsupportedGrantTypeException(v protocol.ResponseMetadata) error { + return &UnsupportedGrantTypeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnsupportedGrantTypeException) Code() string { + return "UnsupportedGrantTypeException" +} + +// Message returns the exception's message. +func (s *UnsupportedGrantTypeException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnsupportedGrantTypeException) OrigErr() error { + return nil +} + +func (s *UnsupportedGrantTypeException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnsupportedGrantTypeException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnsupportedGrantTypeException) RequestID() string { + return s.RespMetadata.RequestID +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go new file mode 100644 index 000000000..083568c61 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go @@ -0,0 +1,67 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package ssooidc provides the client and types for making API +// requests to AWS SSO OIDC. +// +// IAM Identity Center OpenID Connect (OIDC) is a web service that enables a +// client (such as CLI or a native application) to register with IAM Identity +// Center. The service also enables the client to fetch the user’s access +// token upon successful authentication and authorization with IAM Identity +// Center. +// +// IAM Identity Center uses the sso and identitystore API namespaces. +// +// # Considerations for Using This Guide +// +// Before you begin using this guide, we recommend that you first review the +// following important information about how the IAM Identity Center OIDC service +// works. +// +// - The IAM Identity Center OIDC service currently implements only the portions +// of the OAuth 2.0 Device Authorization Grant standard (https://tools.ietf.org/html/rfc8628 +// (https://tools.ietf.org/html/rfc8628)) that are necessary to enable single +// sign-on authentication with the CLI. +// +// - With older versions of the CLI, the service only emits OIDC access tokens, +// so to obtain a new token, users must explicitly re-authenticate. To access +// the OIDC flow that supports token refresh and doesn’t require re-authentication, +// update to the latest CLI version (1.27.10 for CLI V1 and 2.9.0 for CLI +// V2) with support for OIDC token refresh and configurable IAM Identity +// Center session durations. For more information, see Configure Amazon Web +// Services access portal session duration (https://docs.aws.amazon.com/singlesignon/latest/userguide/configure-user-session.html). +// +// - The access tokens provided by this service grant access to all Amazon +// Web Services account entitlements assigned to an IAM Identity Center user, +// not just a particular application. +// +// - The documentation in this guide does not describe the mechanism to convert +// the access token into Amazon Web Services Auth (“sigv4”) credentials +// for use with IAM-protected Amazon Web Services service endpoints. For +// more information, see GetRoleCredentials (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html) +// in the IAM Identity Center Portal API Reference Guide. +// +// For general information about IAM Identity Center, see What is IAM Identity +// Center? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) +// in the IAM Identity Center User Guide. +// +// See https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10 for more information on this service. +// +// See ssooidc package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/ssooidc/ +// +// # Using the Client +// +// To contact AWS SSO OIDC with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS SSO OIDC client SSOOIDC for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/ssooidc/#New +package ssooidc diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go new file mode 100644 index 000000000..cadf4584d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go @@ -0,0 +1,123 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ssooidc + +import ( + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + + // ErrCodeAccessDeniedException for service response error code + // "AccessDeniedException". + // + // You do not have sufficient access to perform this action. + ErrCodeAccessDeniedException = "AccessDeniedException" + + // ErrCodeAuthorizationPendingException for service response error code + // "AuthorizationPendingException". + // + // Indicates that a request to authorize a client with an access user session + // token is pending. + ErrCodeAuthorizationPendingException = "AuthorizationPendingException" + + // ErrCodeExpiredTokenException for service response error code + // "ExpiredTokenException". + // + // Indicates that the token issued by the service is expired and is no longer + // valid. + ErrCodeExpiredTokenException = "ExpiredTokenException" + + // ErrCodeInternalServerException for service response error code + // "InternalServerException". + // + // Indicates that an error from the service occurred while trying to process + // a request. + ErrCodeInternalServerException = "InternalServerException" + + // ErrCodeInvalidClientException for service response error code + // "InvalidClientException". + // + // Indicates that the clientId or clientSecret in the request is invalid. For + // example, this can occur when a client sends an incorrect clientId or an expired + // clientSecret. + ErrCodeInvalidClientException = "InvalidClientException" + + // ErrCodeInvalidClientMetadataException for service response error code + // "InvalidClientMetadataException". + // + // Indicates that the client information sent in the request during registration + // is invalid. + ErrCodeInvalidClientMetadataException = "InvalidClientMetadataException" + + // ErrCodeInvalidGrantException for service response error code + // "InvalidGrantException". + // + // Indicates that a request contains an invalid grant. This can occur if a client + // makes a CreateToken request with an invalid grant type. + ErrCodeInvalidGrantException = "InvalidGrantException" + + // ErrCodeInvalidRedirectUriException for service response error code + // "InvalidRedirectUriException". + // + // Indicates that one or more redirect URI in the request is not supported for + // this operation. + ErrCodeInvalidRedirectUriException = "InvalidRedirectUriException" + + // ErrCodeInvalidRequestException for service response error code + // "InvalidRequestException". + // + // Indicates that something is wrong with the input to the request. For example, + // a required parameter might be missing or out of range. + ErrCodeInvalidRequestException = "InvalidRequestException" + + // ErrCodeInvalidRequestRegionException for service response error code + // "InvalidRequestRegionException". + // + // Indicates that a token provided as input to the request was issued by and + // is only usable by calling IAM Identity Center endpoints in another region. + ErrCodeInvalidRequestRegionException = "InvalidRequestRegionException" + + // ErrCodeInvalidScopeException for service response error code + // "InvalidScopeException". + // + // Indicates that the scope provided in the request is invalid. + ErrCodeInvalidScopeException = "InvalidScopeException" + + // ErrCodeSlowDownException for service response error code + // "SlowDownException". + // + // Indicates that the client is making the request too frequently and is more + // than the service can handle. + ErrCodeSlowDownException = "SlowDownException" + + // ErrCodeUnauthorizedClientException for service response error code + // "UnauthorizedClientException". + // + // Indicates that the client is not currently authorized to make the request. + // This can happen when a clientId is not issued for a public client. + ErrCodeUnauthorizedClientException = "UnauthorizedClientException" + + // ErrCodeUnsupportedGrantTypeException for service response error code + // "UnsupportedGrantTypeException". + // + // Indicates that the grant type in the request is not supported by the service. + ErrCodeUnsupportedGrantTypeException = "UnsupportedGrantTypeException" +) + +var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "AccessDeniedException": newErrorAccessDeniedException, + "AuthorizationPendingException": newErrorAuthorizationPendingException, + "ExpiredTokenException": newErrorExpiredTokenException, + "InternalServerException": newErrorInternalServerException, + "InvalidClientException": newErrorInvalidClientException, + "InvalidClientMetadataException": newErrorInvalidClientMetadataException, + "InvalidGrantException": newErrorInvalidGrantException, + "InvalidRedirectUriException": newErrorInvalidRedirectUriException, + "InvalidRequestException": newErrorInvalidRequestException, + "InvalidRequestRegionException": newErrorInvalidRequestRegionException, + "InvalidScopeException": newErrorInvalidScopeException, + "SlowDownException": newErrorSlowDownException, + "UnauthorizedClientException": newErrorUnauthorizedClientException, + "UnsupportedGrantTypeException": newErrorUnsupportedGrantTypeException, +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go new file mode 100644 index 000000000..782bae369 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go @@ -0,0 +1,106 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ssooidc + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// SSOOIDC provides the API operation methods for making requests to +// AWS SSO OIDC. See this package's package overview docs +// for details on the service. +// +// SSOOIDC methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type SSOOIDC struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "SSO OIDC" // Name of service. + EndpointsID = "oidc" // ID to lookup a service endpoint with. + ServiceID = "SSO OIDC" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the SSOOIDC client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// +// mySession := session.Must(session.NewSession()) +// +// // Create a SSOOIDC client from just a session. +// svc := ssooidc.New(mySession) +// +// // Create a SSOOIDC client with additional configuration +// svc := ssooidc.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSOOIDC { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "sso-oauth" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *SSOOIDC { + svc := &SSOOIDC{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2019-06-10", + ResolvedRegion: resolvedRegion, + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed( + protocol.NewUnmarshalErrorHandler(restjson.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), + ) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SSOOIDC operation and runs any +// custom request initialization. +func (c *SSOOIDC) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go new file mode 100644 index 000000000..2c395f5f6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -0,0 +1,3553 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAssumeRole = "AssumeRole" + +// AssumeRoleRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRole for more information on using the AssumeRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the AssumeRoleRequest method. +// req, resp := client.AssumeRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { + op := &request.Operation{ + Name: opAssumeRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleInput{} + } + + output = &AssumeRoleOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssumeRole API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials that you can use to access +// Amazon Web Services resources. These temporary credentials consist of an +// access key ID, a secret access key, and a security token. Typically, you +// use AssumeRole within your account or for cross-account access. For a comparison +// of AssumeRole with other API operations that produce temporary credentials, +// see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// # Permissions +// +// The temporary security credentials created by AssumeRole can be used to make +// API calls to any Amazon Web Services service with the following exception: +// You cannot call the Amazon Web Services STS GetFederationToken or GetSessionToken +// API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that +// you use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's identity-based +// policy and the session policies. You can use the role's temporary credentials +// in subsequent Amazon Web Services API calls to access resources in the account +// that owns the role. You cannot use session policies to grant more permissions +// than those allowed by the identity-based policy of the role that is being +// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// When you create a role, you create two policies: a role trust policy that +// specifies who can assume the role, and a permissions policy that specifies +// what can be done with the role. You specify the trusted principal that is +// allowed to assume the role in the role trust policy. +// +// To assume a role from a different account, your Amazon Web Services account +// must be trusted by the role. The trust relationship is defined in the role's +// trust policy when the role is created. That trust policy states which accounts +// are allowed to delegate that access to users in the account. +// +// A user who wants to access a role in a different account must also have permissions +// that are delegated from the account administrator. The administrator must +// attach a policy that allows the user to call AssumeRole for the ARN of the +// role in the other account. +// +// To allow a user to assume a role in the same account, you can do either of +// the following: +// +// - Attach a policy to the user that allows the user to call AssumeRole +// (as long as the role's trust policy trusts the account). +// +// - Add the user as a principal directly in the role's trust policy. +// +// You can do either because the role’s trust policy acts as an IAM resource-based +// policy. When a resource-based policy grants access to a principal in the +// same account, no additional identity-based policy is required. For more information +// about trust policies and resource-based policies, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// in the IAM User Guide. +// +// # Tags +// +// (Optional) You can pass tag key-value pairs to your session. These tags are +// called session tags. For more information about session tags, see Passing +// Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// # Using MFA with AssumeRole +// +// (Optional) You can include multi-factor authentication (MFA) information +// when you call AssumeRole. This is useful for cross-account scenarios to ensure +// that the user that assumes the role has been authenticated with an Amazon +// Web Services MFA device. In that scenario, the trust policy of the role being +// assumed includes a condition that tests for MFA authentication. If the caller +// does not include valid MFA information, the request to assume the role is +// denied. The condition in a trust policy that tests for MFA authentication +// might look like the following example. +// +// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} +// +// For more information, see Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) +// in the IAM User Guide guide. +// +// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode +// parameters. The SerialNumber value identifies the user's hardware or virtual +// MFA device. The TokenCode is the time-based one-time password (TOTP) that +// the MFA device produces. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRole for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// - ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// - ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + return out, req.Send() +} + +// AssumeRoleWithContext is the same as AssumeRole with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithSAML = "AssumeRoleWithSAML" + +// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithSAML operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the AssumeRoleWithSAMLRequest method. +// req, resp := client.AssumeRoleWithSAMLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithSAML, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithSAMLInput{} + } + + output = &AssumeRoleWithSAMLOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// AssumeRoleWithSAML API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// via a SAML authentication response. This operation provides a mechanism for +// tying an enterprise identity store or directory to role-based Amazon Web +// Services access without user-specific credentials or configuration. For a +// comparison of AssumeRoleWithSAML with the other API operations that produce +// temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this operation consist of +// an access key ID, a secret access key, and a security token. Applications +// can use these temporary security credentials to sign calls to Amazon Web +// Services services. +// +// # Session Duration +// +// By default, the temporary security credentials created by AssumeRoleWithSAML +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. Your role session lasts for the +// duration that you specify, or until the time specified in the SAML authentication +// response's SessionNotOnOrAfter value, whichever is shorter. You can provide +// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session +// duration setting for the role. This setting can have a value from 1 hour +// to 12 hours. To learn how to view the maximum value for your role, see View +// the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// Role chaining (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining) +// limits your CLI or Amazon Web Services API role session to a maximum of one +// hour. When you use the AssumeRole API operation to assume a role, you can +// specify the duration of your role session with the DurationSeconds parameter. +// You can specify a parameter value of up to 43200 seconds (12 hours), depending +// on the maximum session duration setting for your role. However, if you assume +// a role using role chaining and provide a DurationSeconds parameter value +// greater than one hour, the operation fails. +// +// # Permissions +// +// The temporary security credentials created by AssumeRoleWithSAML can be used +// to make API calls to any Amazon Web Services service with the following exception: +// you cannot call the STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that +// you use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's identity-based +// policy and the session policies. You can use the role's temporary credentials +// in subsequent Amazon Web Services API calls to access resources in the account +// that owns the role. You cannot use session policies to grant more permissions +// than those allowed by the identity-based policy of the role that is being +// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// Calling AssumeRoleWithSAML does not require the use of Amazon Web Services +// security credentials. The identity of the caller is validated by using keys +// in the metadata document that is uploaded for the SAML provider entity for +// your identity provider. +// +// Calling AssumeRoleWithSAML can result in an entry in your CloudTrail logs. +// The entry includes the value in the NameID element of the SAML assertion. +// We recommend that you use a NameIDType that is not associated with any personally +// identifiable information (PII). For example, you could instead use the persistent +// identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). +// +// # Tags +// +// (Optional) You can configure your IdP to pass attributes into your SAML assertion +// as session tags. Each session tag consists of a key name and an associated +// value. For more information about session tags, see Passing Session Tags +// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can pass up to 50 session tags. The plaintext session tag keys can’t +// exceed 128 characters and the values can’t exceed 256 characters. For these +// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// An Amazon Web Services conversion compresses the passed inline session policy, +// managed policy ARNs, and session tags into a packed binary format that has +// a separate limit. Your request can fail for this limit even if your plaintext +// meets the other requirements. The PackedPolicySize response element indicates +// by percentage how close the policies and tags for your request are to the +// upper size limit. +// +// You can pass a session tag with the same key as a tag that is attached to +// the role. When you do, session tags override the role's tags with the same +// key. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// # SAML Configuration +// +// Before your application can call AssumeRoleWithSAML, you must configure your +// SAML identity provider (IdP) to issue the claims required by Amazon Web Services. +// Additionally, you must use Identity and Access Management (IAM) to create +// a SAML provider entity in your Amazon Web Services account that represents +// your identity provider. You must also create an IAM role that specifies this +// SAML provider in its trust policy. +// +// For more information, see the following resources: +// +// - About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +// +// - Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. +// +// - Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. +// +// - Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithSAML for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// - ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// - ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by Amazon Web +// Services. Get a new identity token from the identity provider and then retry +// the request. +// +// - ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// - ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + return out, req.Send() +} + +// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithSAML for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" + +// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithWebIdentity operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. +// req, resp := client.AssumeRoleWithWebIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithWebIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithWebIdentityInput{} + } + + output = &AssumeRoleWithWebIdentityOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// AssumeRoleWithWebIdentity API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// in a mobile or web application with a web identity provider. Example providers +// include the OAuth 2.0 providers Login with Amazon and Facebook, or any OpenID +// Connect-compatible identity provider such as Google or Amazon Cognito federated +// identities (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html). +// +// For mobile applications, we recommend that you use Amazon Cognito. You can +// use Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide +// (http://aws.amazon.com/sdkforios/) and the Amazon Web Services SDK for Android +// Developer Guide (http://aws.amazon.com/sdkforandroid/) to uniquely identify +// a user. You can also supply the user with a consistent identity throughout +// the lifetime of an application. +// +// To learn more about Amazon Cognito, see Amazon Cognito identity pools (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html) +// in Amazon Cognito Developer Guide. +// +// Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web +// Services security credentials. Therefore, you can distribute an application +// (for example, on mobile devices) that requests temporary security credentials +// without including long-term Amazon Web Services credentials in the application. +// You also don't need to deploy server-based proxy services that use long-term +// Amazon Web Services credentials. Instead, the identity of the caller is validated +// by using a token from the web identity provider. For a comparison of AssumeRoleWithWebIdentity +// with the other API operations that produce temporary credentials, see Requesting +// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this API consist of an access +// key ID, a secret access key, and a security token. Applications can use these +// temporary security credentials to sign calls to Amazon Web Services service +// API operations. +// +// # Session Duration +// +// By default, the temporary security credentials created by AssumeRoleWithWebIdentity +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// # Permissions +// +// The temporary security credentials created by AssumeRoleWithWebIdentity can +// be used to make API calls to any Amazon Web Services service with the following +// exception: you cannot call the STS GetFederationToken or GetSessionToken +// API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that +// you use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's identity-based +// policy and the session policies. You can use the role's temporary credentials +// in subsequent Amazon Web Services API calls to access resources in the account +// that owns the role. You cannot use session policies to grant more permissions +// than those allowed by the identity-based policy of the role that is being +// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// # Tags +// +// (Optional) You can configure your IdP to pass attributes into your web identity +// token as session tags. Each session tag consists of a key name and an associated +// value. For more information about session tags, see Passing Session Tags +// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can pass up to 50 session tags. The plaintext session tag keys can’t +// exceed 128 characters and the values can’t exceed 256 characters. For these +// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// An Amazon Web Services conversion compresses the passed inline session policy, +// managed policy ARNs, and session tags into a packed binary format that has +// a separate limit. Your request can fail for this limit even if your plaintext +// meets the other requirements. The PackedPolicySize response element indicates +// by percentage how close the policies and tags for your request are to the +// upper size limit. +// +// You can pass a session tag with the same key as a tag that is attached to +// the role. When you do, the session tag overrides the role tag with the same +// key. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// # Identities +// +// Before your application can call AssumeRoleWithWebIdentity, you must have +// an identity token from a supported identity provider and create a role that +// the application can assume. The role that your application assumes must trust +// the identity provider that is associated with the identity token. In other +// words, the identity provider must be specified in the role's trust policy. +// +// Calling AssumeRoleWithWebIdentity can result in an entry in your CloudTrail +// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) +// of the provided web identity token. We recommend that you avoid using any +// personally identifiable information (PII) in this field. For example, you +// could instead use a GUID or a pairwise identifier, as suggested in the OIDC +// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). +// +// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity +// API, see the following resources: +// +// - Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// - Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/). +// Walk through the process of authenticating through Login with Amazon, +// Facebook, or Google, getting temporary security credentials, and then +// using those credentials to make a request to Amazon Web Services. +// +// - Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) +// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). +// These toolkits contain sample apps that show how to invoke the identity +// providers. The toolkits then show how to use the information from these +// providers to get and use temporary security credentials. +// +// - Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). +// This article discusses web identity federation and shows an example of +// how to use web identity federation to get access to content in Amazon +// S3. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithWebIdentity for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// - ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// - ErrCodeIDPCommunicationErrorException "IDPCommunicationError" +// The request could not be fulfilled because the identity provider (IDP) that +// was asked to verify the incoming identity token could not be reached. This +// is often a transient error caused by network conditions. Retry the request +// a limited number of times so that you don't exceed the request rate. If the +// error persists, the identity provider might be down or not responding. +// +// - ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by Amazon Web +// Services. Get a new identity token from the identity provider and then retry +// the request. +// +// - ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// - ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + return out, req.Send() +} + +// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithWebIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" + +// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the +// client's request for the DecodeAuthorizationMessage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DecodeAuthorizationMessageRequest method. +// req, resp := client.DecodeAuthorizationMessageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { + op := &request.Operation{ + Name: opDecodeAuthorizationMessage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DecodeAuthorizationMessageInput{} + } + + output = &DecodeAuthorizationMessageOutput{} + req = c.newRequest(op, input, output) + return +} + +// DecodeAuthorizationMessage API operation for AWS Security Token Service. +// +// Decodes additional information about the authorization status of a request +// from an encoded message returned in response to an Amazon Web Services request. +// +// For example, if a user is not authorized to perform an operation that he +// or she has requested, the request returns a Client.UnauthorizedOperation +// response (an HTTP 403 response). Some Amazon Web Services operations additionally +// return an encoded message that can provide details about this authorization +// failure. +// +// Only certain Amazon Web Services operations return an encoded authorization +// message. The documentation for an individual operation indicates whether +// that operation returns an encoded message in addition to returning an HTTP +// code. +// +// The message is encoded because the details of the authorization status can +// contain privileged information that the user who requested the operation +// should not see. To decode an authorization status message, a user must be +// granted permissions through an IAM policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// to request the DecodeAuthorizationMessage (sts:DecodeAuthorizationMessage) +// action. +// +// The decoded message includes the following type of information: +// +// - Whether the request was denied due to an explicit deny or due to the +// absence of an explicit allow. For more information, see Determining Whether +// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. +// +// - The principal who made the request. +// +// - The requested action. +// +// - The requested resource. +// +// - The values of condition keys in the context of the user's request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation DecodeAuthorizationMessage for usage and error information. +// +// Returned Error Codes: +// - ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" +// The error returned if the message passed to DecodeAuthorizationMessage was +// invalid. This can happen if the token contains invalid characters, such as +// linebreaks. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + return out, req.Send() +} + +// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of +// the ability to pass a context and additional request options. +// +// See DecodeAuthorizationMessage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetAccessKeyInfo = "GetAccessKeyInfo" + +// GetAccessKeyInfoRequest generates a "aws/request.Request" representing the +// client's request for the GetAccessKeyInfo operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAccessKeyInfo for more information on using the GetAccessKeyInfo +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetAccessKeyInfoRequest method. +// req, resp := client.GetAccessKeyInfoRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo +func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *request.Request, output *GetAccessKeyInfoOutput) { + op := &request.Operation{ + Name: opGetAccessKeyInfo, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccessKeyInfoInput{} + } + + output = &GetAccessKeyInfoOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAccessKeyInfo API operation for AWS Security Token Service. +// +// Returns the account identifier for the specified access key ID. +// +// Access keys consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE) +// and a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). +// For more information about access keys, see Managing Access Keys for IAM +// Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) +// in the IAM User Guide. +// +// When you pass an access key ID to this operation, it returns the ID of the +// Amazon Web Services account to which the keys belong. Access key IDs beginning +// with AKIA are long-term credentials for an IAM user or the Amazon Web Services +// account root user. Access key IDs beginning with ASIA are temporary credentials +// that are created using STS operations. If the account in the response belongs +// to you, you can sign in as the root user and review your root user access +// keys. Then, you can pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) +// to learn which IAM user owns the keys. To learn who requested the temporary +// credentials for an ASIA access key, view the STS events in your CloudTrail +// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) +// in the IAM User Guide. +// +// This operation does not indicate the state of the access key. The key might +// be active, inactive, or deleted. Active keys might not have permissions to +// perform an operation. Providing a deleted access key might return an error +// that the key doesn't exist. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetAccessKeyInfo for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo +func (c *STS) GetAccessKeyInfo(input *GetAccessKeyInfoInput) (*GetAccessKeyInfoOutput, error) { + req, out := c.GetAccessKeyInfoRequest(input) + return out, req.Send() +} + +// GetAccessKeyInfoWithContext is the same as GetAccessKeyInfo with the addition of +// the ability to pass a context and additional request options. +// +// See GetAccessKeyInfo for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetAccessKeyInfoWithContext(ctx aws.Context, input *GetAccessKeyInfoInput, opts ...request.Option) (*GetAccessKeyInfoOutput, error) { + req, out := c.GetAccessKeyInfoRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetCallerIdentity = "GetCallerIdentity" + +// GetCallerIdentityRequest generates a "aws/request.Request" representing the +// client's request for the GetCallerIdentity operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetCallerIdentity for more information on using the GetCallerIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetCallerIdentityRequest method. +// req, resp := client.GetCallerIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { + op := &request.Operation{ + Name: opGetCallerIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCallerIdentityInput{} + } + + output = &GetCallerIdentityOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetCallerIdentity API operation for AWS Security Token Service. +// +// Returns details about the IAM user or role whose credentials are used to +// call the operation. +// +// No permissions are required to perform this operation. If an administrator +// attaches a policy to your identity that explicitly denies access to the sts:GetCallerIdentity +// action, you can still perform this operation. Permissions are not required +// because the same information is returned when access is denied. To view an +// example response, see I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetCallerIdentity for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + return out, req.Send() +} + +// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See GetCallerIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetFederationToken = "GetFederationToken" + +// GetFederationTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetFederationToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetFederationToken for more information on using the GetFederationToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetFederationTokenRequest method. +// req, resp := client.GetFederationTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { + op := &request.Operation{ + Name: opGetFederationToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetFederationTokenInput{} + } + + output = &GetFederationTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetFederationToken API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) for a user. A typical +// use is in a proxy application that gets temporary security credentials on +// behalf of distributed applications inside a corporate network. +// +// You must call the GetFederationToken operation using the long-term security +// credentials of an IAM user. As a result, this call is appropriate in contexts +// where those credentials can be safeguarded, usually in a server-based application. +// For a comparison of GetFederationToken with the other API operations that +// produce temporary credentials, see Requesting Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// Although it is possible to call GetFederationToken using the security credentials +// of an Amazon Web Services account root user rather than an IAM user that +// you create for the purpose of a proxy application, we do not recommend it. +// For more information, see Safeguard your root user credentials and don't +// use them for everyday tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) +// in the IAM User Guide. +// +// You can create a mobile-based or browser-based app that can authenticate +// users using a web identity provider like Login with Amazon, Facebook, Google, +// or an OpenID Connect-compatible identity provider. In this case, we recommend +// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// For more information, see Federation Through a Web-based Identity Provider +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// in the IAM User Guide. +// +// # Session duration +// +// The temporary credentials are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default +// session duration is 43,200 seconds (12 hours). Temporary credentials obtained +// by using the root user credentials have a maximum duration of 3,600 seconds +// (1 hour). +// +// # Permissions +// +// You can use the temporary credentials created by GetFederationToken in any +// Amazon Web Services service with the following exceptions: +// +// - You cannot call any IAM operations using the CLI or the Amazon Web Services +// API. This limitation does not apply to console sessions. +// +// - You cannot call any STS operations except GetCallerIdentity. +// +// You can use temporary credentials for single sign-on (SSO) to the console. +// +// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that +// you use for both inline and managed session policies can't exceed 2,048 characters. +// +// Though the session policy parameters are optional, if you do not pass a policy, +// then the resulting federated user session has no permissions. When you pass +// session policies, the session permissions are the intersection of the IAM +// user policies and the session policies that you pass. This gives you a way +// to further restrict the permissions for a federated user. You cannot use +// session policies to grant more permissions than those that are defined in +// the permissions policy of the IAM user. For more information, see Session +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. For information about using GetFederationToken to +// create temporary security credentials, see GetFederationToken—Federation +// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +// +// You can use the credentials to access a resource that has a resource-based +// policy. If that policy specifically references the federated user session +// in the Principal element of the policy, the session has the permissions allowed +// by the policy. These permissions are granted in addition to the permissions +// granted by the session policies. +// +// # Tags +// +// (Optional) You can pass tag key-value pairs to your session. These are called +// session tags. For more information about session tags, see Passing Session +// Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can create a mobile-based or browser-based app that can authenticate +// users using a web identity provider like Login with Amazon, Facebook, Google, +// or an OpenID Connect-compatible identity provider. In this case, we recommend +// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// For more information, see Federation Through a Web-based Identity Provider +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// in the IAM User Guide. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// Tag key–value pairs are not case sensitive, but case is preserved. This +// means that you cannot have separate Department and department tag keys. Assume +// that the user that you are federating has the Department=Marketing tag and +// you pass the department=engineering session tag. Department and department +// are not saved as separate tags, and the session tag passed in the request +// takes precedence over the user tag. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetFederationToken for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// - ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + return out, req.Send() +} + +// GetFederationTokenWithContext is the same as GetFederationToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetFederationToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetSessionToken = "GetSessionToken" + +// GetSessionTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetSessionToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetSessionToken for more information on using the GetSessionToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetSessionTokenRequest method. +// req, resp := client.GetSessionTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { + op := &request.Operation{ + Name: opGetSessionToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSessionTokenInput{} + } + + output = &GetSessionTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetSessionToken API operation for AWS Security Token Service. +// +// Returns a set of temporary credentials for an Amazon Web Services account +// or IAM user. The credentials consist of an access key ID, a secret access +// key, and a security token. Typically, you use GetSessionToken if you want +// to use MFA to protect programmatic calls to specific Amazon Web Services +// API operations like Amazon EC2 StopInstances. +// +// MFA-enabled IAM users must call GetSessionToken and submit an MFA code that +// is associated with their MFA device. Using the temporary security credentials +// that the call returns, IAM users can then make programmatic calls to API +// operations that require MFA authentication. An incorrect MFA code causes +// the API to return an access denied error. For a comparison of GetSessionToken +// with the other API operations that produce temporary credentials, see Requesting +// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// No permissions are required for users to perform this operation. The purpose +// of the sts:GetSessionToken operation is to authenticate the user using MFA. +// You cannot use policies to control authentication operations. For more information, +// see Permissions for GetSessionToken (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html) +// in the IAM User Guide. +// +// # Session Duration +// +// The GetSessionToken operation must be called by using the long-term Amazon +// Web Services security credentials of an IAM user. Credentials that are created +// by IAM users are valid for the duration that you specify. This duration can +// range from 900 seconds (15 minutes) up to a maximum of 129,600 seconds (36 +// hours), with a default of 43,200 seconds (12 hours). Credentials based on +// account credentials can range from 900 seconds (15 minutes) up to 3,600 seconds +// (1 hour), with a default of 1 hour. +// +// # Permissions +// +// The temporary security credentials created by GetSessionToken can be used +// to make API calls to any Amazon Web Services service with the following exceptions: +// +// - You cannot call any IAM API operations unless MFA authentication information +// is included in the request. +// +// - You cannot call any STS API except AssumeRole or GetCallerIdentity. +// +// The credentials that GetSessionToken returns are based on permissions associated +// with the IAM user whose credentials were used to call the operation. The +// temporary credentials have the same permissions as the IAM user. +// +// Although it is possible to call GetSessionToken using the security credentials +// of an Amazon Web Services account root user rather than an IAM user, we do +// not recommend it. If GetSessionToken is called using root user credentials, +// the temporary credentials have root user permissions. For more information, +// see Safeguard your root user credentials and don't use them for everyday +// tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) +// in the IAM User Guide +// +// For more information about using GetSessionToken to create temporary credentials, +// see Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetSessionToken for usage and error information. +// +// Returned Error Codes: +// - ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + return out, req.Send() +} + +// GetSessionTokenWithContext is the same as GetSessionToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetSessionToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +type AssumeRoleInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value specified can range + // from 900 seconds (15 minutes) up to the maximum session duration set for + // the role. The maximum session duration setting can have a value from 1 hour + // to 12 hours. If you specify a value higher than this setting or the administrator + // setting (whichever is lower), the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. + // + // Role chaining limits your Amazon Web Services CLI or Amazon Web Services + // API role session to a maximum of one hour. When you use the AssumeRole API + // operation to assume a role, you can specify the duration of your role session + // with the DurationSeconds parameter. You can specify a parameter value of + // up to 43200 seconds (12 hours), depending on the maximum session duration + // setting for your role. However, if you assume a role using role chaining + // and provide a DurationSeconds parameter value greater than one hour, the + // operation fails. To learn how to view the maximum value for your role, see + // View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // A unique identifier that might be required when you assume a role in another + // account. If the administrator of the account to which the role belongs provided + // you with an external ID, then provide that value in the ExternalId parameter. + // This value can be any string, such as a passphrase or account number. A cross-account + // role is usually set up to trust everyone in an account. Therefore, the administrator + // of the trusting account might send an external ID to the administrator of + // the trusted account. That way, only someone with the ID can assume the role, + // rather than everyone in the account. For more information about the external + // ID, see How to Use an External ID When Granting Access to Your Amazon Web + // Services Resources to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- + ExternalId *string `min:"2" type:"string"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent Amazon Web Services API calls + // to access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see Amazon + // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent Amazon Web Services API calls to access resources in the account + // that owns the role. You cannot use session policies to grant more permissions + // than those allowed by the identity-based policy of the role that is being + // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // A list of previously acquired trusted context assertions in the format of + // a JSON array. The trusted context assertion is signed and encrypted by Amazon + // Web Services STS. + // + // The following is an example of a ProvidedContext value that includes a single + // trusted context assertion and the ARN of the context provider from which + // the trusted context assertion was generated. + // + // [{"ProviderArn":"arn:aws:iam::aws:contextProvider/IdentityCenter","ContextAssertion":"trusted-context-assertion"}] + ProvidedContexts []*ProvidedContext `type:"list"` + + // The Amazon Resource Name (ARN) of the role to assume. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. + // + // Use the role session name to uniquely identify a session when the same role + // is assumed by different principals or for different reasons. In cross-account + // scenarios, the role session name is visible to, and can be logged by the + // account that owns the role. The role session name is also used in the ARN + // of the assumed role principal. This means that subsequent cross-account API + // requests that use the temporary security credentials will expose the role + // session name to the external account in their CloudTrail logs. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SerialNumber *string `min:"9" type:"string"` + + // The source identity specified by the principal that is calling the AssumeRole + // operation. + // + // You can require users to specify a source identity when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. You can use source identity information in CloudTrail logs to determine + // who took actions with a role. You can use the aws:SourceIdentity condition + // key to further control access to Amazon Web Services resources based on the + // value of source identity. For more information about using source identity, + // see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@-. You cannot + // use a value that begins with the text aws:. This prefix is reserved for Amazon + // Web Services internal use. + SourceIdentity *string `min:"2" type:"string"` + + // A list of session tags that you want to pass. Each session tag consists of + // a key name and an associated value. For more information about session tags, + // see Tagging Amazon Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plaintext + // session tag keys can’t exceed 128 characters, and the values can’t exceed + // 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the role. When you do, session tags override a role tag with the same + // key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This + // means that you cannot have separate Department and department tag keys. Assume + // that the role has the Department=Marketing tag and you pass the department=engineering + // session tag. Department and department are not saved as separate tags, and + // the session tag passed in the request takes precedence over the role tag. + // + // Additionally, if you used temporary credentials to perform this operation, + // the new session inherits any transitive session tags from the calling session. + // If you pass a session tag with the same key as an inherited tag, the operation + // fails. To view the inherited tags for a session, see the CloudTrail logs. + // For more information, see Viewing Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs) + // in the IAM User Guide. + Tags []*Tag `type:"list"` + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA. (In other words, if the policy includes a condition + // that tests for MFA). If the role being assumed requires MFA and if the TokenCode + // value is missing or expired, the AssumeRole call returns an "access denied" + // error. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` + + // A list of keys for session tags that you want to set as transitive. If you + // set a tag key as transitive, the corresponding key and value passes to subsequent + // sessions in a role chain. For more information, see Chaining Roles with Session + // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) + // in the IAM User Guide. + // + // This parameter is optional. When you set session tags as transitive, the + // session policy and session tags packed binary limit is not affected. + // + // If you choose not to specify a transitive tag key, then no tags are passed + // from this session to any subsequent sessions. + TransitiveTagKeys []*string `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.ExternalId != nil && len(*s.ExternalId) < 2 { + invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.SourceIdentity != nil && len(*s.SourceIdentity) < 2 { + invalidParams.Add(request.NewErrParamMinLen("SourceIdentity", 2)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ProvidedContexts != nil { + for i, v := range s.ProvidedContexts { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ProvidedContexts", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput { + s.DurationSeconds = &v + return s +} + +// SetExternalId sets the ExternalId field's value. +func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput { + s.ExternalId = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleInput { + s.PolicyArns = v + return s +} + +// SetProvidedContexts sets the ProvidedContexts field's value. +func (s *AssumeRoleInput) SetProvidedContexts(v []*ProvidedContext) *AssumeRoleInput { + s.ProvidedContexts = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput { + s.RoleSessionName = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput { + s.SerialNumber = &v + return s +} + +// SetSourceIdentity sets the SourceIdentity field's value. +func (s *AssumeRoleInput) SetSourceIdentity(v string) *AssumeRoleInput { + s.SourceIdentity = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *AssumeRoleInput) SetTags(v []*Tag) *AssumeRoleInput { + s.Tags = v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { + s.TokenCode = &v + return s +} + +// SetTransitiveTagKeys sets the TransitiveTagKeys field's value. +func (s *AssumeRoleInput) SetTransitiveTagKeys(v []*string) *AssumeRoleInput { + s.TransitiveTagKeys = v + return s +} + +// Contains the response to a successful AssumeRole request, including temporary +// Amazon Web Services credentials that can be used to make Amazon Web Services +// requests. +type AssumeRoleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The source identity specified by the principal that is calling the AssumeRole + // operation. + // + // You can require users to specify a source identity when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. You can use source identity information in CloudTrail logs to determine + // who took actions with a role. You can use the aws:SourceIdentity condition + // key to further control access to Amazon Web Services resources based on the + // value of source identity. For more information about using source identity, + // see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SourceIdentity *string `min:"2" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput { + s.AssumedRoleUser = v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { + s.PackedPolicySize = &v + return s +} + +// SetSourceIdentity sets the SourceIdentity field's value. +func (s *AssumeRoleOutput) SetSourceIdentity(v string) *AssumeRoleOutput { + s.SourceIdentity = &v + return s +} + +type AssumeRoleWithSAMLInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. Your role session lasts for + // the duration that you specify for the DurationSeconds parameter, or until + // the time specified in the SAML authentication response's SessionNotOnOrAfter + // value, whichever is shorter. You can provide a DurationSeconds value from + // 900 seconds (15 minutes) up to the maximum session duration setting for the + // role. This setting can have a value from 1 hour to 12 hours. If you specify + // a value higher than this setting, the operation fails. For example, if you + // specify a session duration of 12 hours, but your administrator set the maximum + // session duration to 6 hours, your operation fails. To learn how to view the + // maximum value for your role, see View the Maximum Session Duration Setting + // for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent Amazon Web Services API calls + // to access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see Amazon + // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent Amazon Web Services API calls to access resources in the account + // that owns the role. You cannot use session policies to grant more permissions + // than those allowed by the identity-based policy of the role that is being + // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes + // the IdP. + // + // PrincipalArn is a required field + PrincipalArn *string `min:"20" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // The base64 encoded SAML authentication response provided by the IdP. + // + // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // in the IAM User Guide. + // + // SAMLAssertion is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by AssumeRoleWithSAMLInput's + // String and GoString methods. + // + // SAMLAssertion is a required field + SAMLAssertion *string `min:"4" type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithSAMLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithSAMLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithSAMLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PrincipalArn == nil { + invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) + } + if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.SAMLAssertion == nil { + invalidParams.Add(request.NewErrParamRequired("SAMLAssertion")) + } + if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { + invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithSAMLInput { + s.PolicyArns = v + return s +} + +// SetPrincipalArn sets the PrincipalArn field's value. +func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput { + s.PrincipalArn = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput { + s.RoleArn = &v + return s +} + +// SetSAMLAssertion sets the SAMLAssertion field's value. +func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput { + s.SAMLAssertion = &v + return s +} + +// Contains the response to a successful AssumeRoleWithSAML request, including +// temporary Amazon Web Services credentials that can be used to make Amazon +// Web Services requests. +type AssumeRoleWithSAMLOutput struct { + _ struct{} `type:"structure"` + + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The value of the Recipient attribute of the SubjectConfirmationData element + // of the SAML assertion. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // The value of the Issuer element of the SAML assertion. + Issuer *string `type:"string"` + + // A hash value based on the concatenation of the following: + // + // * The Issuer response value. + // + // * The Amazon Web Services account ID. + // + // * The friendly name (the last part of the ARN) of the SAML provider in + // IAM. + // + // The combination of NameQualifier and Subject can be used to uniquely identify + // a user. + // + // The following pseudocode shows how the hash value is calculated: + // + // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" + // ) ) + NameQualifier *string `type:"string"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The value in the SourceIdentity attribute in the SAML assertion. + // + // You can require users to set a source identity value when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. That way, actions that are taken with the role are associated with + // that user. After the source identity is set, the value cannot be changed. + // It is present in the request for all actions that are taken by the role and + // persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) + // sessions. You can configure your SAML identity provider to use an attribute + // associated with your users, like user name or email, as the source identity + // when calling AssumeRoleWithSAML. You do this by adding an attribute to the + // SAML assertion. For more information about using source identity, see Monitor + // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SourceIdentity *string `min:"2" type:"string"` + + // The value of the NameID element in the Subject element of the SAML assertion. + Subject *string `type:"string"` + + // The format of the name ID, as defined by the Format attribute in the NameID + // element of the SAML assertion. Typical examples of the format are transient + // or persistent. + // + // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, + // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient + // is returned as transient. If the format includes any other prefix, the format + // is returned with no modifications. + SubjectType *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithSAMLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithSAMLOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput { + s.Credentials = v + return s +} + +// SetIssuer sets the Issuer field's value. +func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput { + s.Issuer = &v + return s +} + +// SetNameQualifier sets the NameQualifier field's value. +func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput { + s.NameQualifier = &v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput { + s.PackedPolicySize = &v + return s +} + +// SetSourceIdentity sets the SourceIdentity field's value. +func (s *AssumeRoleWithSAMLOutput) SetSourceIdentity(v string) *AssumeRoleWithSAMLOutput { + s.SourceIdentity = &v + return s +} + +// SetSubject sets the Subject field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput { + s.Subject = &v + return s +} + +// SetSubjectType sets the SubjectType field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput { + s.SubjectType = &v + return s +} + +type AssumeRoleWithWebIdentityInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent Amazon Web Services API calls + // to access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see Amazon + // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent Amazon Web Services API calls to access resources in the account + // that owns the role. You cannot use session policies to grant more permissions + // than those allowed by the identity-based policy of the role that is being + // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // The fully qualified host component of the domain name of the OAuth 2.0 identity + // provider. Do not specify this value for an OpenID Connect identity provider. + // + // Currently www.amazon.com and graph.facebook.com are the only supported identity + // providers for OAuth 2.0 access tokens. Do not include URL schemes and port + // numbers. + // + // Do not specify this value for OpenID Connect ID tokens. + ProviderId *string `min:"4" type:"string"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. Typically, you pass the name + // or identifier that is associated with the user who is using your application. + // That way, the temporary security credentials that your application will use + // are associated with that user. This session name is included as part of the + // ARN and assumed role ID in the AssumedRoleUser response element. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The OAuth 2.0 access token or OpenID Connect ID token that is provided by + // the identity provider. Your application must get this token by authenticating + // the user who is using your application with a web identity provider before + // the application makes an AssumeRoleWithWebIdentity call. Only tokens with + // RSA algorithms (RS256) are supported. + // + // WebIdentityToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by AssumeRoleWithWebIdentityInput's + // String and GoString methods. + // + // WebIdentityToken is a required field + WebIdentityToken *string `min:"4" type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithWebIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithWebIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithWebIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.ProviderId != nil && len(*s.ProviderId) < 4 { + invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.WebIdentityToken == nil { + invalidParams.Add(request.NewErrParamRequired("WebIdentityToken")) + } + if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithWebIdentityInput { + s.PolicyArns = v + return s +} + +// SetProviderId sets the ProviderId field's value. +func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput { + s.ProviderId = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput { + s.RoleSessionName = &v + return s +} + +// SetWebIdentityToken sets the WebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput { + s.WebIdentityToken = &v + return s +} + +// Contains the response to a successful AssumeRoleWithWebIdentity request, +// including temporary Amazon Web Services credentials that can be used to make +// Amazon Web Services requests. +type AssumeRoleWithWebIdentityOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The intended audience (also known as client ID) of the web identity token. + // This is traditionally the client identifier issued to the application that + // requested the web identity token. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The issuing authority of the web identity token presented. For OpenID Connect + // ID tokens, this contains the value of the iss field. For OAuth 2.0 access + // tokens, this contains the value of the ProviderId parameter that was passed + // in the AssumeRoleWithWebIdentity request. + Provider *string `type:"string"` + + // The value of the source identity that is returned in the JSON web token (JWT) + // from the identity provider. + // + // You can require users to set a source identity value when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. That way, actions that are taken with the role are associated with + // that user. After the source identity is set, the value cannot be changed. + // It is present in the request for all actions that are taken by the role and + // persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) + // sessions. You can configure your identity provider to use an attribute associated + // with your users, like user name or email, as the source identity when calling + // AssumeRoleWithWebIdentity. You do this by adding a claim to the JSON web + // token. To learn more about OIDC tokens and claims, see Using Tokens with + // User Pools (https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html) + // in the Amazon Cognito Developer Guide. For more information about using source + // identity, see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SourceIdentity *string `min:"2" type:"string"` + + // The unique user identifier that is returned by the identity provider. This + // identifier is associated with the WebIdentityToken that was submitted with + // the AssumeRoleWithWebIdentity call. The identifier is typically unique to + // the user and the application that acquired the WebIdentityToken (pairwise + // identifier). For OpenID Connect ID tokens, this field contains the value + // returned by the identity provider as the token's sub (Subject) claim. + SubjectFromWebIdentityToken *string `min:"6" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithWebIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithWebIdentityOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput { + s.PackedPolicySize = &v + return s +} + +// SetProvider sets the Provider field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput { + s.Provider = &v + return s +} + +// SetSourceIdentity sets the SourceIdentity field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetSourceIdentity(v string) *AssumeRoleWithWebIdentityOutput { + s.SourceIdentity = &v + return s +} + +// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput { + s.SubjectFromWebIdentityToken = &v + return s +} + +// The identifiers for the temporary security credentials that the operation +// returns. +type AssumedRoleUser struct { + _ struct{} `type:"structure"` + + // The ARN of the temporary security credentials that are returned from the + // AssumeRole action. For more information about ARNs and how to use them in + // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // A unique identifier that contains the role ID and the role session name of + // the role that is being assumed. The role ID is generated by Amazon Web Services + // when the role is created. + // + // AssumedRoleId is a required field + AssumedRoleId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumedRoleUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumedRoleUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser { + s.Arn = &v + return s +} + +// SetAssumedRoleId sets the AssumedRoleId field's value. +func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { + s.AssumedRoleId = &v + return s +} + +// Amazon Web Services credentials for API authentication. +type Credentials struct { + _ struct{} `type:"structure"` + + // The access key ID that identifies the temporary security credentials. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The date on which the current credentials expire. + // + // Expiration is a required field + Expiration *time.Time `type:"timestamp" required:"true"` + + // The secret access key that can be used to sign requests. + // + // SecretAccessKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by Credentials's + // String and GoString methods. + // + // SecretAccessKey is a required field + SecretAccessKey *string `type:"string" required:"true" sensitive:"true"` + + // The token that users must pass to the service API to use the temporary credentials. + // + // SessionToken is a required field + SessionToken *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Credentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Credentials) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *Credentials) SetAccessKeyId(v string) *Credentials { + s.AccessKeyId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *Credentials) SetExpiration(v time.Time) *Credentials { + s.Expiration = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *Credentials) SetSecretAccessKey(v string) *Credentials { + s.SecretAccessKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *Credentials) SetSessionToken(v string) *Credentials { + s.SessionToken = &v + return s +} + +type DecodeAuthorizationMessageInput struct { + _ struct{} `type:"structure"` + + // The encoded message that was returned with the response. + // + // EncodedMessage is a required field + EncodedMessage *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DecodeAuthorizationMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DecodeAuthorizationMessageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DecodeAuthorizationMessageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"} + if s.EncodedMessage == nil { + invalidParams.Add(request.NewErrParamRequired("EncodedMessage")) + } + if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncodedMessage sets the EncodedMessage field's value. +func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput { + s.EncodedMessage = &v + return s +} + +// A document that contains additional information about the authorization status +// of a request from an encoded message that is returned in response to an Amazon +// Web Services request. +type DecodeAuthorizationMessageOutput struct { + _ struct{} `type:"structure"` + + // The API returns a response with the decoded message. + DecodedMessage *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DecodeAuthorizationMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DecodeAuthorizationMessageOutput) GoString() string { + return s.String() +} + +// SetDecodedMessage sets the DecodedMessage field's value. +func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput { + s.DecodedMessage = &v + return s +} + +// Identifiers for the federated user that is associated with the credentials. +type FederatedUser struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the federated user that is associated with the credentials. + // For more information about ARNs and how to use them in policies, see IAM + // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // The string that identifies the federated user associated with the credentials, + // similar to the unique ID of an IAM user. + // + // FederatedUserId is a required field + FederatedUserId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FederatedUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FederatedUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *FederatedUser) SetArn(v string) *FederatedUser { + s.Arn = &v + return s +} + +// SetFederatedUserId sets the FederatedUserId field's value. +func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { + s.FederatedUserId = &v + return s +} + +type GetAccessKeyInfoInput struct { + _ struct{} `type:"structure"` + + // The identifier of an access key. + // + // This parameter allows (through its regex pattern) a string of characters + // that can consist of any upper- or lowercase letter or digit. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetAccessKeyInfoInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetAccessKeyInfoInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAccessKeyInfoInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAccessKeyInfoInput"} + if s.AccessKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("AccessKeyId")) + } + if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 { + invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *GetAccessKeyInfoInput) SetAccessKeyId(v string) *GetAccessKeyInfoInput { + s.AccessKeyId = &v + return s +} + +type GetAccessKeyInfoOutput struct { + _ struct{} `type:"structure"` + + // The number used to identify the Amazon Web Services account. + Account *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetAccessKeyInfoOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetAccessKeyInfoOutput) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *GetAccessKeyInfoOutput) SetAccount(v string) *GetAccessKeyInfoOutput { + s.Account = &v + return s +} + +type GetCallerIdentityInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCallerIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCallerIdentityInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetCallerIdentity request, including +// information about the entity making the request. +type GetCallerIdentityOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Web Services account ID number of the account that owns or contains + // the calling entity. + Account *string `type:"string"` + + // The Amazon Web Services ARN associated with the calling entity. + Arn *string `min:"20" type:"string"` + + // The unique identifier of the calling entity. The exact value depends on the + // type of entity that is making the call. The values returned are those listed + // in the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // found on the Policy Variables reference page in the IAM User Guide. + UserId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCallerIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCallerIdentityOutput) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput { + s.Account = &v + return s +} + +// SetArn sets the Arn field's value. +func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput { + s.Arn = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput { + s.UserId = &v + return s +} + +type GetFederationTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the session should last. Acceptable durations + // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds + // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained + // using root user credentials are restricted to a maximum of 3,600 seconds + // (one hour). If the specified duration is longer than one hour, the session + // obtained by using root user credentials defaults to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The name of the federated user. The name is used as an identifier for the + // temporary security credentials (such as Bob). For example, you can reference + // the federated user name in a resource-based policy, such as in an Amazon + // S3 bucket policy. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // Name is a required field + Name *string `min:"2" type:"string" required:"true"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policy Amazon + // Resource Names (ARNs) to use as managed session policies. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. + // + // When you pass session policies, the session permissions are the intersection + // of the IAM user policies and the session policies that you pass. This gives + // you a way to further restrict the permissions for a federated user. You cannot + // use session policies to grant more permissions than those that are defined + // in the permissions policy of the IAM user. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The resulting credentials can be used to access a resource that has a resource-based + // policy. If that policy specifically references the federated user session + // in the Principal element of the policy, the session has the permissions allowed + // by the policy. These permissions are granted in addition to the permissions + // that are granted by the session policies. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as a managed session policy. The policies must exist in the same account + // as the IAM user that is requesting federated access. + // + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policy Amazon + // Resource Names (ARNs) to use as managed session policies. The plaintext that + // you use for both inline and managed session policies can't exceed 2,048 characters. + // You can provide up to 10 managed policy ARNs. For more information about + // ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. + // + // When you pass session policies, the session permissions are the intersection + // of the IAM user policies and the session policies that you pass. This gives + // you a way to further restrict the permissions for a federated user. You cannot + // use session policies to grant more permissions than those that are defined + // in the permissions policy of the IAM user. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The resulting credentials can be used to access a resource that has a resource-based + // policy. If that policy specifically references the federated user session + // in the Principal element of the policy, the session has the permissions allowed + // by the policy. These permissions are granted in addition to the permissions + // that are granted by the session policies. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // A list of session tags. Each session tag consists of a key name and an associated + // value. For more information about session tags, see Passing Session Tags + // in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plaintext + // session tag keys can’t exceed 128 characters and the values can’t exceed + // 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the user you are federating. When you do, session tags override a user + // tag with the same key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This + // means that you cannot have separate Department and department tag keys. Assume + // that the role has the Department=Marketing tag and you pass the department=engineering + // session tag. Department and department are not saved as separate tags, and + // the session tag passed in the request takes precedence over the role tag. + Tags []*Tag `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetFederationTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetFederationTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetFederationTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 2 { + invalidParams.Add(request.NewErrParamMinLen("Name", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput { + s.DurationSeconds = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput { + s.Name = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *GetFederationTokenInput) SetPolicyArns(v []*PolicyDescriptorType) *GetFederationTokenInput { + s.PolicyArns = v + return s +} + +// SetTags sets the Tags field's value. +func (s *GetFederationTokenInput) SetTags(v []*Tag) *GetFederationTokenInput { + s.Tags = v + return s +} + +// Contains the response to a successful GetFederationToken request, including +// temporary Amazon Web Services credentials that can be used to make Amazon +// Web Services requests. +type GetFederationTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // Identifiers for the federated user associated with the credentials (such + // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You + // can use the federated user's ARN in your resource-based policies, such as + // an Amazon S3 bucket policy. + FederatedUser *FederatedUser `type:"structure"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetFederationTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetFederationTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput { + s.Credentials = v + return s +} + +// SetFederatedUser sets the FederatedUser field's value. +func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput { + s.FederatedUser = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput { + s.PackedPolicySize = &v + return s +} + +type GetSessionTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the credentials should remain valid. Acceptable + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600 + // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions + // for Amazon Web Services account owners are restricted to a maximum of 3,600 + // seconds (one hour). If the duration is longer than one hour, the session + // for Amazon Web Services account owners defaults to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The identification number of the MFA device that is associated with the IAM + // user who is making the GetSessionToken call. Specify this value if the IAM + // user has a policy that requires MFA authentication. The value is either the + // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource + // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // You can find the device for an IAM user by going to the Amazon Web Services + // Management Console and viewing the user's security credentials. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if MFA is required. If any policy requires + // the IAM user to submit an MFA code, specify this value. If MFA authentication + // is required, the user must provide a code when requesting a set of temporary + // security credentials. A user who fails to provide the code receives an "access + // denied" response when requesting resources that require MFA authentication. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSessionTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSessionTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSessionTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput { + s.DurationSeconds = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput { + s.SerialNumber = &v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { + s.TokenCode = &v + return s +} + +// Contains the response to a successful GetSessionToken request, including +// temporary Amazon Web Services credentials that can be used to make Amazon +// Web Services requests. +type GetSessionTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSessionTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSessionTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput { + s.Credentials = v + return s +} + +// A reference to the IAM managed policy that is passed as a session policy +// for a role session or a federated user session. +type PolicyDescriptorType struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session + // policy for the role. For more information about ARNs, see Amazon Resource + // Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. + Arn *string `locationName:"arn" min:"20" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PolicyDescriptorType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PolicyDescriptorType) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PolicyDescriptorType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PolicyDescriptorType"} + if s.Arn != nil && len(*s.Arn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { + s.Arn = &v + return s +} + +// Contains information about the provided context. This includes the signed +// and encrypted trusted context assertion and the context provider ARN from +// which the trusted context assertion was generated. +type ProvidedContext struct { + _ struct{} `type:"structure"` + + // The signed and encrypted trusted context assertion generated by the context + // provider. The trusted context assertion is signed and encrypted by Amazon + // Web Services STS. + ContextAssertion *string `min:"4" type:"string"` + + // The context provider ARN from which the trusted context assertion was generated. + ProviderArn *string `min:"20" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvidedContext) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvidedContext) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ProvidedContext) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProvidedContext"} + if s.ContextAssertion != nil && len(*s.ContextAssertion) < 4 { + invalidParams.Add(request.NewErrParamMinLen("ContextAssertion", 4)) + } + if s.ProviderArn != nil && len(*s.ProviderArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("ProviderArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetContextAssertion sets the ContextAssertion field's value. +func (s *ProvidedContext) SetContextAssertion(v string) *ProvidedContext { + s.ContextAssertion = &v + return s +} + +// SetProviderArn sets the ProviderArn field's value. +func (s *ProvidedContext) SetProviderArn(v string) *ProvidedContext { + s.ProviderArn = &v + return s +} + +// You can pass custom key-value pair attributes when you assume a role or federate +// a user. These are called session tags. You can then use the session tags +// to control access to resources. For more information, see Tagging Amazon +// Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +type Tag struct { + _ struct{} `type:"structure"` + + // The key for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag keys can’t + // exceed 128 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The value for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go new file mode 100644 index 000000000..d5307fcaa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go @@ -0,0 +1,11 @@ +package sts + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = customizeRequest +} + +func customizeRequest(r *request.Request) { + r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeIDPCommunicationErrorException) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go new file mode 100644 index 000000000..ea1d9eb0c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -0,0 +1,31 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package sts provides the client and types for making API +// requests to AWS Security Token Service. +// +// Security Token Service (STS) enables you to request temporary, limited-privilege +// credentials for users. This guide provides descriptions of the STS API. For +// more information about using this service, see Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// +// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. +// +// See sts package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/ +// +// # Using the Client +// +// To contact AWS Security Token Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Security Token Service client STS for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New +package sts diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go new file mode 100644 index 000000000..b680bbd5d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -0,0 +1,84 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +const ( + + // ErrCodeExpiredTokenException for service response error code + // "ExpiredTokenException". + // + // The web identity token that was passed is expired or is not valid. Get a + // new identity token from the identity provider and then retry the request. + ErrCodeExpiredTokenException = "ExpiredTokenException" + + // ErrCodeIDPCommunicationErrorException for service response error code + // "IDPCommunicationError". + // + // The request could not be fulfilled because the identity provider (IDP) that + // was asked to verify the incoming identity token could not be reached. This + // is often a transient error caused by network conditions. Retry the request + // a limited number of times so that you don't exceed the request rate. If the + // error persists, the identity provider might be down or not responding. + ErrCodeIDPCommunicationErrorException = "IDPCommunicationError" + + // ErrCodeIDPRejectedClaimException for service response error code + // "IDPRejectedClaim". + // + // The identity provider (IdP) reported that authentication failed. This might + // be because the claim is invalid. + // + // If this error is returned for the AssumeRoleWithWebIdentity operation, it + // can also mean that the claim has expired or has been explicitly revoked. + ErrCodeIDPRejectedClaimException = "IDPRejectedClaim" + + // ErrCodeInvalidAuthorizationMessageException for service response error code + // "InvalidAuthorizationMessageException". + // + // The error returned if the message passed to DecodeAuthorizationMessage was + // invalid. This can happen if the token contains invalid characters, such as + // linebreaks. + ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException" + + // ErrCodeInvalidIdentityTokenException for service response error code + // "InvalidIdentityToken". + // + // The web identity token that was passed could not be validated by Amazon Web + // Services. Get a new identity token from the identity provider and then retry + // the request. + ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken" + + // ErrCodeMalformedPolicyDocumentException for service response error code + // "MalformedPolicyDocument". + // + // The request was rejected because the policy document was malformed. The error + // message describes the specific error. + ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument" + + // ErrCodePackedPolicyTooLargeException for service response error code + // "PackedPolicyTooLarge". + // + // The request was rejected because the total packed size of the session policies + // and session tags combined was too large. An Amazon Web Services conversion + // compresses the session policy document, session policy ARNs, and session + // tags into a packed binary format that has a separate limit. The error message + // indicates by percentage how close the policies and tags are to the upper + // size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // You could receive this error even though you meet other defined session policy + // and session tag limits. For more information, see IAM and STS Entity Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" + + // ErrCodeRegionDisabledException for service response error code + // "RegionDisabledException". + // + // STS is not activated in the requested region for the account that is being + // asked to generate credentials. The account administrator must use the IAM + // console to activate STS in that region. For more information, see Activating + // and Deactivating Amazon Web Services STS in an Amazon Web Services Region + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // in the IAM User Guide. + ErrCodeRegionDisabledException = "RegionDisabledException" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go new file mode 100644 index 000000000..12327d053 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -0,0 +1,104 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// STS provides the API operation methods for making requests to +// AWS Security Token Service. See this package's package overview docs +// for details on the service. +// +// STS methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type STS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "sts" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "STS" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the STS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// +// mySession := session.Must(session.NewSession()) +// +// // Create a STS client from just a session. +// svc := sts.New(mySession) +// +// // Create a STS client with additional configuration +// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = EndpointsID + // No Fallback + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *STS { + svc := &STS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2011-06-15", + ResolvedRegion: resolvedRegion, + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a STS operation and runs any +// custom request initialization. +func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go new file mode 100644 index 000000000..bf06b2e7d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go @@ -0,0 +1,96 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package stsiface provides an interface to enable mocking the AWS Security Token Service service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package stsiface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/sts" +) + +// STSAPI provides an interface to enable mocking the +// sts.STS service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // AWS Security Token Service. +// func myFunc(svc stsiface.STSAPI) bool { +// // Make svc.AssumeRole request +// } +// +// func main() { +// sess := session.New() +// svc := sts.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockSTSClient struct { +// stsiface.STSAPI +// } +// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockSTSClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type STSAPI interface { + AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) + AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) + AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput) + + AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error) + AssumeRoleWithSAMLWithContext(aws.Context, *sts.AssumeRoleWithSAMLInput, ...request.Option) (*sts.AssumeRoleWithSAMLOutput, error) + AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput) + + AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error) + AssumeRoleWithWebIdentityWithContext(aws.Context, *sts.AssumeRoleWithWebIdentityInput, ...request.Option) (*sts.AssumeRoleWithWebIdentityOutput, error) + AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput) + + DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error) + DecodeAuthorizationMessageWithContext(aws.Context, *sts.DecodeAuthorizationMessageInput, ...request.Option) (*sts.DecodeAuthorizationMessageOutput, error) + DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput) + + GetAccessKeyInfo(*sts.GetAccessKeyInfoInput) (*sts.GetAccessKeyInfoOutput, error) + GetAccessKeyInfoWithContext(aws.Context, *sts.GetAccessKeyInfoInput, ...request.Option) (*sts.GetAccessKeyInfoOutput, error) + GetAccessKeyInfoRequest(*sts.GetAccessKeyInfoInput) (*request.Request, *sts.GetAccessKeyInfoOutput) + + GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error) + GetCallerIdentityWithContext(aws.Context, *sts.GetCallerIdentityInput, ...request.Option) (*sts.GetCallerIdentityOutput, error) + GetCallerIdentityRequest(*sts.GetCallerIdentityInput) (*request.Request, *sts.GetCallerIdentityOutput) + + GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error) + GetFederationTokenWithContext(aws.Context, *sts.GetFederationTokenInput, ...request.Option) (*sts.GetFederationTokenOutput, error) + GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput) + + GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error) + GetSessionTokenWithContext(aws.Context, *sts.GetSessionTokenInput, ...request.Option) (*sts.GetSessionTokenOutput, error) + GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput) +} + +var _ STSAPI = (*sts.STS)(nil) diff --git a/vendor/github.com/c2fo/vfs/v6/.gitignore b/vendor/github.com/c2fo/vfs/v6/.gitignore new file mode 100644 index 000000000..3ce5adbbd --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/.gitignore @@ -0,0 +1,2 @@ +.idea +vendor diff --git a/vendor/github.com/c2fo/vfs/v6/.golangci.yml b/vendor/github.com/c2fo/vfs/v6/.golangci.yml new file mode 100644 index 000000000..e461b062b --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/.golangci.yml @@ -0,0 +1,765 @@ +# This file contains all available configuration options +# with their default values. + +# options for analysis running +run: + # default concurrency is a available CPU number + #concurrency: 4 + + # timeout for analysis, e.g. 30s, 5m, default is 1m + timeout: 3m + + # exit code when at least one issue was found, default is 1 + issues-exit-code: 1 + + # include test files or not, default is true + tests: true + + # list of build tags, all linters use it. Default is empty list. + #build-tags: + # - mytag + + # default is true. Enables skipping of directories: + # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ + skip-dirs-use-default: true + + # which files to skip: they will be analyzed, but issues from them + # won't be reported. Default value is empty list, but there is + # no need to include all autogenerated files, we confidently recognize + # autogenerated files. If it's not please let us know. + # "/" will be replaced by current OS file path separator to properly work + # on Windows. +# skip-files: +# - ".*\\.my\\.go$" +# - lib/bad.go + + # by default isn't set. If set we pass it to "go list -mod={option}". From "go help modules": + # If invoked with -mod=readonly, the go command is disallowed from the implicit + # automatic updating of go.mod described above. Instead, it fails when any changes + # to go.mod are needed. This setting is most useful to check that go.mod does + # not need updates, such as in a continuous integration and testing system. + # If invoked with -mod=vendor, the go command assumes that the vendor + # directory holds the correct copies of dependencies and ignores + # the dependency descriptions in go.mod. +# modules-download-mode: readonly|vendor|mod + + # Allow multiple parallel golangci-lint instances running. + # If false (default) - golangci-lint acquires file lock on start. + allow-parallel-runners: false + + +# output configuration options +output: + # colored-line-number|line-number|json|tab|checkstyle|code-climate|junit-xml|github-actions + # default is "colored-line-number" + formats: + - format: colored-line-number + path: stdout + + # print lines of code with issue, default is true + print-issued-lines: true + + # print linter name in the end of issue text, default is true + print-linter-name: true + + # make issues output unique by line, default is true + uniq-by-line: true + + # add a prefix to the output file references; default is no prefix + path-prefix: "" + + # sorts results by: filepath, line and column + sort-results: true + +linters-settings: +# depguard: +# list-type: blacklist +# include-go-root: false +# packages: +# - github.com/sirupsen/logrus +# packages-with-error-message: +# # specify an error message to output when a blacklisted package is used +# - github.com/sirupsen/logrus: "logging is allowed only by logutils.Log" +# +# dupl: +# # tokens count to trigger issue, 150 by default +# threshold: 100 + + errcheck: + # report about not checking of errors in type assertions: `a := b.(MyStruct)`; + # default is false: such cases aren't reported by default. + check-type-assertions: false + + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; + # default is false: such cases aren't reported by default. + check-blank: false + + # [deprecated] comma-separated list of pairs of the form pkg:regex + # the regex is used to ignore names within pkg. (default "fmt:.*"). + # see https://github.com/kisielk/errcheck#the-deprecated-method for details +# ignore: fmt:.*,io/ioutil:^Read.* + + # path to a file containing a list of functions to exclude from checking + # see https://github.com/kisielk/errcheck#excluding-functions for details +# exclude: /path/to/file.txt + +# errorlint: +# # Check whether fmt.Errorf uses the %w verb for formatting errors. See the readme for caveats +# errorf: true +# # Check for plain type assertions and type switches +# asserts: true +# # Check for plain error comparisons +# comparison: true +# +# exhaustive: +# # check switch statements in generated files also +# check-generated: false +# # indicates that switch statements are to be considered exhaustive if a +# # 'default' case is present, even if all enum members aren't listed in the +# # switch +# default-signifies-exhaustive: false +# +# exhaustivestruct: +# # Struct Patterns is list of expressions to match struct packages and names +# # The struct packages have the form example.com/package.ExampleStruct +# # The matching patterns can use matching syntax from https://pkg.go.dev/path#Match +# # If this list is empty, all structs are tested. +# struct-patterns: +# - '*.Test' +# - 'example.com/package.ExampleStruct' +# +# forbidigo: +# # Forbid the following identifiers (identifiers are written using regexp): +# forbid: +# - ^print.*$ +# - 'fmt\.Print.*' +# # Exclude godoc examples from forbidigo checks. Default is true. +# exclude_godoc_examples: false +# +# funlen: +# lines: 100 +# statements: 50 + + gci: + # Section configuration to compare against. + # Section names are case-insensitive and may contain parameters in (). + # The default order of sections is `standard > default > custom > blank > dot > alias > localmodule`, + # If `custom-order` is `true`, it follows the order of `sections` option. + # Default: ["standard", "default"] + sections: + - standard # Standard section: captures all standard packages. + - default # Default section: contains all imports that could not be matched to another section type. + - prefix(github.com/c2fo/) # Custom section: groups all imports with the specified Prefix. +# - blank # Blank section: contains all blank imports. This section is not present unless explicitly enabled. +# - dot # Dot section: contains all dot imports. This section is not present unless explicitly enabled. +# - alias # Alias section: contains all alias imports. This section is not present unless explicitly enabled. +# - localmodule # Local module section: contains all local packages. This section is not present unless explicitly enabled. + + # Skip generated files. + # Default: true + skip-generated: true + + # Enable custom order of sections. + # If `true`, make the section order the same as the order of `sections`. + # Default: false + custom-order: false + +# gocognit: +# # minimal code complexity to report, 30 by default (but we recommend 10-20) +# min-complexity: 10 +# +# nestif: +# # minimal complexity of if statements to report, 5 by default +# min-complexity: 4 +# +# goconst: +# # minimal length of string constant, 3 by default +# min-len: 3 +# # minimal occurrences count to trigger, 3 by default +# min-occurrences: 2 + + gocritic: + # Which checks should be enabled; can't be combined with 'disabled-checks'; + # See https://go-critic.github.io/overview#checks-overview + # To check which checks are enabled run `GL_DEBUG=gocritic golangci-lint run` + # By default list of stable checks is used. +# enabled-checks: +# - rangeValCopy + + # Which checks should be disabled; can't be combined with 'enabled-checks'; default is empty + disabled-checks: + - dupImport # https://github.com/go-critic/go-critic/issues/845 + - ifElseChain + - octalLiteral + - whyNoLint + - wrapperFunc + - hugeParam + # Enable multiple checks by tags, run `GL_DEBUG=gocritic golangci-lint run` to see all tags and checks. + # Empty list by default. See https://github.com/go-critic/go-critic#usage -> section "Tags". + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + # Settings passed to gocritic. + # The settings key is the name of a supported gocritic checker. + # The list of supported checkers can be find in https://go-critic.github.io/overview. + settings: + captLocal: # must be valid enabled check name + # whether to restrict checker to params only (default true) + paramsOnly: true + elseif: + # whether to skip balanced if-else pairs (default true) + skipBalanced: true +# hugeParam: + # size in bytes that makes the warning trigger (default 80) +# sizeThreshold: 80 + nestingReduce: + # min number of statements inside a branch to trigger a warning (default 5) + bodyWidth: 5 + rangeExprCopy: + # size in bytes that makes the warning trigger (default 512) + sizeThreshold: 512 + # whether to check test functions (default true) + skipTestFuncs: true + rangeValCopy: + # size in bytes that makes the warning trigger (default 128) + sizeThreshold: 128 + # whether to check test functions (default true) + skipTestFuncs: true + truncateCmp: + # whether to skip int/uint/uintptr types (default true) + skipArchDependent: true + underef: + # whether to skip (*x).method() calls where x is a pointer receiver (default true) + skipRecvDeref: true + unnamedResult: + # whether to check exported functions + checkExported: true + + gocyclo: + # minimal code complexity to report, 30 by default (but we recommend 10-20) + min-complexity: 15 + +# godot: +# # comments to be checked: `declarations`, `toplevel`, or `all` +# scope: declarations +# # list of regexps for excluding particular comment lines from check +# exclude: +# # example: exclude comments which contain numbers +# # - '[0-9]+' +# # check that each sentence starts with a capital letter +# capital: false +# +# godox: +# # report any comments starting with keywords, this is useful for TODO or FIXME comments that +# # might be left in the code accidentally and should be resolved before merging +# keywords: # default keywords are TODO, BUG, and FIXME, these can be overwritten by this setting +# - NOTE +# - OPTIMIZE # marks code that should be optimized before merging +# - HACK # marks hack-arounds that should be removed before merging + + gofmt: + # simplify code: gofmt with `-s` option, true by default + simplify: true + +# gofumpt: +# # Select the Go version to target. The default is `1.15`. +# lang-version: "1.15" +# +# # Choose whether or not to use the extra rules that are disabled +# # by default +# extra-rules: false +# +# goheader: +# values: +# const: +# # define here const type values in format k:v, for example: +# # COMPANY: MY COMPANY +# regexp: +# # define here regexp type values, for example +# # AUTHOR: .*@mycompany\.com +# template: # |- +# # put here copyright header template for source code files, for example: +# # Note: {{ YEAR }} is a builtin value that returns the year relative to the current machine time. +# # +# # {{ AUTHOR }} {{ COMPANY }} {{ YEAR }} +# # SPDX-License-Identifier: Apache-2.0 +# +# # Licensed under the Apache License, Version 2.0 (the "License"); +# # you may not use this file except in compliance with the License. +# # You may obtain a copy of the License at: +# +# # http://www.apache.org/licenses/LICENSE-2.0 +# +# # Unless required by applicable law or agreed to in writing, software +# # distributed under the License is distributed on an "AS IS" BASIS, +# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# # See the License for the specific language governing permissions and +# # limitations under the License. +# template-path: +# # also as alternative of directive 'template' you may put the path to file with the template source + + goimports: + # put imports beginning with prefix after 3rd-party packages; + # it's a comma-separated list of prefixes + local-prefixes: github.com/c2fo/ + +# golint: +# # minimal confidence for issues, default is 0.8 +# min-confidence: 0.8 + +# gomnd: +# settings: +# mnd: +# # the list of enabled checks, see https://github.com/tommy-muehle/go-mnd/#checks for description. +# #checks: argument,case,condition,operation,return,assign +# checks: argument,case,condition,return +# # ignored-numbers: 1000 +# # ignored-files: magic_.*.go +# # ignored-functions: math.* +# +# gomoddirectives: +# # Allow local `replace` directives. Default is false. +# replace-local: false +# # List of allowed `replace` directives. Default is empty. +# replace-allow-list: +# - launchpad.net/gocheck +# # Allow to not explain why the version has been retracted in the `retract` directives. Default is false. +# retract-allow-no-explanation: false +# # Forbid the use of the `exclude` directives. Default is false. +# exclude-forbidden: false +# +# gomodguard: +# allowed: +# modules: # List of allowed modules +# # - gopkg.in/yaml.v2 +# domains: # List of allowed module domains +# # - golang.org +# blocked: +# modules: # List of blocked modules +# # - github.com/uudashr/go-module: # Blocked module +# # recommendations: # Recommended modules that should be used instead (Optional) +# # - golang.org/x/mod +# # reason: "`mod` is the official go.mod parser library." # Reason why the recommended module should be used (Optional) +# versions: # List of blocked module version constraints +# # - github.com/mitchellh/go-homedir: # Blocked module with version constraint +# # version: "< 1.1.0" # Version constraint, see https://github.com/Masterminds/semver#basic-comparisons +# # reason: "testing if blocked version constraint works." # Reason why the version constraint exists. (Optional) +# local_replace_directives: false + +# gosec: +# # To select a subset of rules to run. +# # Available rules: https://github.com/securego/gosec#available-rules +# includes: +# - G401 +# - G306 +# - G101 +# # To specify a set of rules to explicitly exclude. +# # Available rules: https://github.com/securego/gosec#available-rules +# excludes: +# - G204 +# # To specify the configuration of rules. +# # The configuration of rules is not fully documented by gosec: +# # https://github.com/securego/gosec#configuration +# # https://github.com/securego/gosec/blob/569328eade2ccbad4ce2d0f21ee158ab5356a5cf/rules/rulelist.go#L60-L102 +# config: +# G306: "0600" +# G101: +# pattern: "(?i)example" +# ignore_entropy: false +# entropy_threshold: "80.0" +# per_char_threshold: "3.0" +# truncate: "32" + gosec: + excludes: + # Flags for potentially-unsafe casting of ints, similar problem to globally-disabled G103 + - G115 + + gosimple: + # Select the Go version to target. The default is '1.13'. + go: "1.21" + # https://staticcheck.io/docs/options#checks + checks: [ "all", "-ST1003" ] + +# govet: +# # report about shadowed variables +# check-shadowing: true +# +# # settings per analyzer +# settings: +# printf: # analyzer name, run `go tool vet help` to see all analyzers +# funcs: # run `go tool vet help printf` to see available settings for `printf` analyzer +# - (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof +# - (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf +# - (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf +# - (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf +# +# # enable or disable analyzers by name +# # run `go tool vet help` to see all analyzers +# enable: +# - atomicalign +# enable-all: false +# disable: +# - shadow +# disable-all: false +# +# ifshort: +# # Maximum length of variable declaration measured in number of lines, after which linter won't suggest using short syntax. +# # Has higher priority than max-decl-chars. +# max-decl-lines: 1 +# # Maximum length of variable declaration measured in number of characters, after which linter won't suggest using short syntax. +# max-decl-chars: 30 +# +# importas: +# # if set to `true`, force to use alias. +# no-unaliased: true +# # List of aliases +# alias: +# # using `servingv1` alias for `knative.dev/serving/pkg/apis/serving/v1` package +# - pkg: knative.dev/serving/pkg/apis/serving/v1 +# alias: servingv1 +# # using `autoscalingv1alpha1` alias for `knative.dev/serving/pkg/apis/autoscaling/v1alpha1` package +# - pkg: knative.dev/serving/pkg/apis/autoscaling/v1alpha1 +# alias: autoscalingv1alpha1 +# # You can specify the package path by regular expression, +# # and alias by regular expression expansion syntax like below. +# # see https://github.com/julz/importas#use-regular-expression for details +# - pkg: knative.dev/serving/pkg/apis/(\w+)/(v[\w\d]+) +# alias: $1$2 + + lll: + # max line length, lines longer will be reported. Default is 120. + # '\t' is counted as 1 character by default, and can be changed with the tab-width option + line-length: 140 + # tab width in spaces. Default to 1. + tab-width: 1 + +# makezero: +# # Allow only slices initialized with a length of zero. Default is false. +# always: false + + maligned: + # print struct with more effective memory layout or not, false by default + suggest-new: true + + misspell: + # Correct spellings using locale preferences for US or UK. + # Default is to use a neutral variety of English. + # Setting locale to US will correct the British spelling of 'colour' to 'color'. + locale: US +# ignore-words: +# - someword + +# nakedret: +# # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 +# max-func-lines: 30 +# +# prealloc: +# # XXX: we don't recommend using this linter before doing performance profiling. +# # For most programs usage of prealloc will be a premature optimization. +# +# # Report preallocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them. +# # True by default. +# simple: true +# range-loops: true # Report preallocation suggestions on range loops, true by default +# for-loops: false # Report preallocation suggestions on for loops, false by default +# +# promlinter: +# # Promlinter cannot infer all metrics name in static analysis. +# # Enable strict mode will also include the errors caused by failing to parse the args. +# strict: false +# # Please refer to https://github.com/yeya24/promlinter#usage for detailed usage. +# disabled-linters: +# # - "Help" +# # - "MetricUnits" +# # - "Counter" +# # - "HistogramSummaryReserved" +# # - "MetricTypeInName" +# # - "ReservedChars" +# # - "CamelCase" +# # - "lintUnitAbbreviations" +# +# predeclared: +# # comma-separated list of predeclared identifiers to not report on +# ignore: "" +# # include method names and field names (i.e., qualified names) in checks +# q: false + + nolintlint: + # Enable to ensure that nolint directives are all used. Default is true. + allow-unused: false + # Disable to ensure that nolint directives don't have a leading space. Default is true. + allow-leading-space: true + # Exclude following linters from requiring an explanation. Default is []. + allow-no-explanation: [] + # Enable to require an explanation of nonzero length after each nolint directive. Default is false. + require-explanation: false + # Enable to require nolint directives to mention the specific linter being suppressed. Default is false. + require-specific: false + + revive: + # see https://github.com/mgechev/revive#available-rules for details. + ignore-generated-header: true + severity: warning +# rules: +# - name: indent-error-flow +# severity: warning +# - name: add-constant +# severity: warning +# arguments: +# - maxLitCount: "3" +# allowStrs: '""' +# allowInts: "0,1,2" +# allowFloats: "0.0,0.,1.0,1.,2.0,2." + + staticcheck: + # Select the Go version to target. The default is '1.13'. + go: "1.21" + # https://staticcheck.io/docs/options#checks + checks: [ "all", "-ST1003" ] + + stylecheck: + # Select the Go version to target. The default is '1.13'. + go: "1.21" + # https://staticcheck.io/docs/options#checks + checks: [ "all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022" ] +# # https://staticcheck.io/docs/options#dot_import_whitelist +# dot-import-whitelist: +# - fmt +# # https://staticcheck.io/docs/options#initialisms +# initialisms: [ "ACL", "API", "ASCII", "CPU", "CSS", "DNS", "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", "IP", "JSON", "QPS", "RAM", "RPC", "SLA", "SMTP", "SQL", "SSH", "TCP", "TLS", "TTL", "UDP", "UI", "GID", "UID", "UUID", "URI", "URL", "UTF8", "VM", "XML", "XMPP", "XSRF", "XSS" ] +# # https://staticcheck.io/docs/options#http_status_code_whitelist +# http-status-code-whitelist: [ "200", "400", "404", "500" ] + +# tagliatelle: +# # check the struck tag name case +# case: +# # use the struct field name to check the name of the struct tag +# use-field-name: true +# rules: +# # any struct tag type can be used. +# # support string case: `camel`, `pascal`, `kebab`, `snake`, `goCamel`, `goPascal`, `goKebab`, `goSnake`, `upper`, `lower` +# json: camel +# yaml: camel +# xml: camel +# bson: camel +# avro: snake +# mapstructure: kebab +# +# testpackage: +# # regexp pattern to skip files +# skip-regexp: (export|internal)_test\.go +# +# thelper: +# # The following configurations enable all checks. It can be omitted because all checks are enabled by default. +# # You can enable only required checks deleting unnecessary checks. +# test: +# first: true +# name: true +# begin: true +# benchmark: +# first: true +# name: true +# begin: true +# tb: +# first: true +# name: true +# begin: true + + unparam: + # Inspect exported functions, default is false. Set to true if no external program/library imports your code. + # XXX: if you enable this setting, unparam will report a lot of false-positives in text editors: + # if it's called for subdir of a project it can't find external interfaces. All text editor integrations + # with golangci-lint call it on a directory with the changed file. + check-exported: false + + unused: + # Select the Go version to target. The default is '1.13'. + go: "1.21" + +# whitespace: +# multi-if: false # Enforces newlines (or comments) after every multi-line if statement +# multi-func: false # Enforces newlines (or comments) after every multi-line function signature +# +# wrapcheck: +# # An array of strings that specify substrings of signatures to ignore. +# # If this set, it will override the default set of ignored signatures. +# # See https://github.com/tomarrell/wrapcheck#configuration for more information. +# ignoreSigs: +# - .Errorf( +# - errors.New( +# - errors.Unwrap( +# - .Wrap( +# - .Wrapf( +# - .WithMessage( +# +# wsl: +# # See https://github.com/bombsimon/wsl/blob/master/doc/configuration.md for +# # documentation of available settings. These are the defaults for +# # `golangci-lint`. +# allow-assign-and-anything: false +# allow-assign-and-call: true +# allow-cuddle-declarations: false +# allow-multiline-assign: true +# allow-separated-leading-comment: false +# allow-trailing-comment: false +# force-case-trailing-whitespace: 0 +# force-err-cuddling: false +# force-short-decl-cuddling: false +# strict-append: true +# +# # The custom section can be used to define linter plugins to be loaded at runtime. +# # See README doc for more info. +# custom: +# # Each custom linter should have a unique name. +# example: +# # The path to the plugin *.so. Can be absolute or local. Required for each custom linter +# path: /path/to/example.so +# # The description of the linter. Optional, just for documentation purposes. +# description: This is an example usage of a plugin linter. +# # Intended to point to the repo location of the linter. Optional, just for documentation purposes. +# original-url: github.com/golangci/example-linter + +linters: + # please, do not use `enable-all`: it's deprecated and will be removed soon. + # inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint + disable-all: true + enable: + - bodyclose +# - deadcode +# - depguard +# - dogsled +# - dupl ** + - errcheck +# - exportloopref +# - exhaustive +# - funlen ** +# - gochecknoinits +# - goconst + - gocritic + - gocyclo + - gofmt + - goimports +# - golint +# - gomnd ** +# - goprintffuncname + - gosec + - gosimple + - govet + - ineffassign + - lll +# - maligned + - misspell +# - nakedret +# - noctx + - nolintlint +# - rowserrcheck + - revive + - staticcheck +# - structcheck + - stylecheck +# - typecheck + - unconvert + - unparam + - unused +# - varcheck +# - whitespace + + # don't enable: + # - asciicheck + # - scopelint + # - gochecknoglobals ** + # - gocognit + # - godot + # - godox ** + # - goerr113 * + # - nestif + # - prealloc + # - testpackage + # - wsl + +issues: + # Excluding configuration per-path, per-linter, per-text and per-source + exclude-rules: + - path: _test\.go + linters: + - gomnd + # Exclude lll issues for long lines with go:generate + - linters: + - lll + source: "^//go:generate " + + # Independently from option `exclude` we use default exclude patterns, + # it can be disabled by this option. To list all + # excluded by default patterns execute `golangci-lint run --help`. + # Default value for this option is true. + exclude-use-default: false + + # The default value is false. If set to true exclude and exclude-rules + # regular expressions become case sensitive. + exclude-case-sensitive: false + + # Which dirs to exclude: issues from them won't be reported. + # Can use regexp here: `generated.*`, regexp is applied on full path, + # including the path prefix if one is set. + # Default dirs are skipped independently of this option's value (see exclude-dirs-use-default). + # "/" will be replaced by current OS file path separator to properly work on Windows. + # Default: [] + exclude-dirs: + - docs + - mocks + + # The list of ids of default excludes to include or disable. By default it's empty. +# include: +# - EXC0002 # disable excluding of issues about comments from golint + + # Maximum issues count per one linter. Set to 0 to disable. Default is 50. + max-issues-per-linter: 50 + + # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. + max-same-issues: 3 + + # Show only new issues: if there are unstaged changes or untracked files, + # only those changes are analyzed, else only changes in HEAD~ are analyzed. + # It's a super-useful option for integration of golangci-lint into existing + # large codebase. It's not practical to fix all existing issues at the moment + # of integration: much better don't allow issues in new code. + # Default is false. + new: false + + # Show only new issues created after git revision `REV` + #new-from-rev: REV + + # Show only new issues created in git patch with set file path. + #new-from-patch: path/to/patch/file + + # Fix found issues (if it's supported by the linter) + #fix: true + +severity: + # Default value is empty string. + # Set the default severity for issues. If severity rules are defined and the issues + # do not match or no severity is provided to the rule this will be the default + # severity applied. Severities should match the supported severity names of the + # selected out format. + # - Code climate: https://docs.codeclimate.com/docs/issues#issue-severity + # - Checkstyle: https://checkstyle.sourceforge.io/property_types.html#severity + # - Github: https://help.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-error-message + #default-severity: error + + # The default value is false. + # If set to true severity-rules regular expressions become case sensitive. + case-sensitive: false + + # Default value is empty list. + # When a list of severity rules are provided, severity information will be added to lint + # issues. Severity rules have the same filtering capability as exclude rules except you + # are allowed to specify one matcher per severity rule. + # Only affects out formats that support setting severity information. +# rules: +# - linters: +# - dupl +# severity: info + diff --git a/vendor/github.com/c2fo/vfs/v6/.gvmrc b/vendor/github.com/c2fo/vfs/v6/.gvmrc new file mode 100644 index 000000000..4dcbbf963 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/.gvmrc @@ -0,0 +1 @@ +go1.23.0 diff --git a/vendor/github.com/c2fo/vfs/v6/.mockery.yaml b/vendor/github.com/c2fo/vfs/v6/.mockery.yaml new file mode 100644 index 000000000..481e54846 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/.mockery.yaml @@ -0,0 +1,15 @@ +dir: "{{.InterfaceDir}}/mocks" +filename: "{{.InterfaceName}}.go" +mockname: "{{.InterfaceName}}" +outpkg: mocks +with-expecter: true +packages: + github.com/c2fo/vfs/v6: + config: + all: true + github.com/c2fo/vfs/v6/backend/s3: + config: + all: true + github.com/c2fo/vfs/v6/backend/sftp: + config: + all: true diff --git a/vendor/github.com/c2fo/vfs/v6/CHANGELOG.md b/vendor/github.com/c2fo/vfs/v6/CHANGELOG.md new file mode 100644 index 000000000..74af6d481 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/CHANGELOG.md @@ -0,0 +1,396 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [Unreleased] + +## [6.19.0] - 2024-09-13 +### Added +- Add ability to set file permissions after writing an SFTP file. Resolves #202. + +## [6.18.0] - 2024-09-12 +### Added +- Updated mocks to use mockery Expecter. Resolves #200. + +## [6.17.0] - 2024-09-10 +### Added +- Added additionally-supported HostKeyAlgorithms for SFTP backend. Resolves #198. + +## [6.16.0] - 2024-08-24 +### Security +- Fixes #196 - Update to latest go and dependencies + - Update .gvmrc to 1.23.0. + - Update go.mod min version to Go 1.23. + - Update actions to test only supported Go versions (1.22, 1.23). + - Updated dependencies. + - Update golangci-lint.yml for latest golang-ci-lint version. + +## [6.15.1] - 2024-06-18 +### Added +- Fixed #193 - Correct method signature for EncodeAuthority to not return an error. +- Updated dependencies. + +## [6.15.0] - 2024-06-18 +### Added +- Fixed #191 - Add a util functions to encode authority strictly following RFC 3986. + +## [6.14.3] - 2024-06-11 +### Fixed +- Fixed #189 - Update utils authority package to handle proper encoding/decoding of uri with reserved characters. + +## [6.14.2] - 2024-05-30 +### Fixed +- Fixed #187 - Update to latest jlaffaye/ftp library to fix issue where FTPS connections were failing due to a bug in the library. Also updated dataconn to continue even if it fails to MakeDir. + + +## [6.14.1] - 2024-05-28 +### Fixed +- Fixed #185 - location.Exists was checking if a list entry was a directory but it was was only checking the first entry. + +## [6.14.0] - 2024-05-15 +### Security +- updated dependencies + +## [6.13.2] - 2024-05-15 +### Fixed +- Fixed #182 - location.Exists was checking the wrong dir for existence. Now it checks the file's parent dir for the subdir's existence. +- Corrected where file.Close() occurs then file.Location().NewFile("somefile.txt") but was not repsecting the connection reset. Now it does. + +## [6.13.1] - 2024-04-24 +### Fixed +- Fixed #180 - SFTP backend now only opens files for W (instead of RW) except after read/seek. This should fix issues where server permission are write only. + +## [6.13.0] - 2024-03-09 +### Fixed +- Fixed #177 - SFTP MoveToFile fails if destination file already exists +### Added +- Updated all backends to use error wrapping for Read, Write, Close, Seek. #174 +- Update GS backend to comply with new io integration test suite. #172 + +## [6.12.1] - 2024-02-23 +### Fixed +- Fixed #170 - Fix bug where mem backend doesn't remove fs entries when location.DeleteFile is called + +## [6.12.0] - 2024-02-23 +### Added +- Fixed #167 - Update CopyToFile to defer file close so cursor is reset. +- Fixes #146 - Update S3 backend to comply with new io integration test suite. Update to no longer write to memory. +- Fixes #156 - Update os backend to comply with new io integration test suite +- Fixes #160 - Update mem backend to comply with new io integration test suite +### Security +- Fixes #165 - Update to latest go and dependencies + - Update .gvmrc to 1.22.0. + - Update go.mod min version to Go 1.22. + - Update actions to test only supported Go versions (1.21, 1.22) + - Updated dependencies (ie, Google Cloud, Azure, AWS sdks) + +## [6.11.4] - 2024-03-08 +### Fixed +- Fixed #177 - SFTP MoveToFile fails if destination file already exists + +## [6.11.3] - 2024-02-02 +### Fixed +- Fixed #158 bug. Updated sftp backend to to fix issue where some servers return a generic error message when a file is opened for RW on Read(). + +## [6.11.2] - 2024-01-30 +### Fixed +- Fixed #154 bug. Updated sftp backend to overwrite except after read/seek. + +## [6.11.1] - 2024-01-22 +### Fixed +- Fixed #152 bug where s3 backend failed to read empty files + +## [6.11.0] - 2024-01-22 +### Added +- Added support for hmac-sha1 and hmac-sha1-96 and removed hmac-ripemd160 + +## [6.10.0] - 2023-12-28 +### Fixed +- fixed #145 - Remove use of local temp file when reading/seeking from s3 files. This should improve performance by allowing streaming reads from s3 files. +### Added +- fixes #149 - Add overrideable default HostKeyAlgorithms, Ciphers, MACs, and KeyExchanges + +## [6.9.1] - 2023-11-21 +### Fixed +- fixed sftp backend to consider configured ports other than 22 + +## [6.9.0] - 2023-09-27 + +### Security +- Update .gvmrc to 1.21.1. Fixes #143. +- Update go.mod min version to Go 1.20. +- Update actions to test only supported Go versions (1.20, 1.21) +- Updated dependencies (ie, Google Cloud, Azure, AWS sdks) + +## [6.8.0] - 2023-07-29 +### Added +- ftp backend to support connection via FTP, FTPS (implicit TLS), and FTPES (explicit TLS). + +## [6.7.0] - 2023-05-13 + +### Security +- Update go.mod min version to Go 1.18. Fixes #139. +- Update actions to test only supported Go versions (1.19, 1.20) +- Updated dependencies (ie, Google Cloud, Azure, AWS sdks) + +## [6.6.0] - 2022-10-25 +### Fixed +- fixed #129(cdd4143) Resolve failing golangci-lint - elliotwutingfeng +- fixed #133 Replaced usage of deprecated ioutil functions with os and io equivalents. + +### Security +- Update go.mod min version to Go 1.18. Fixes #131. +- Update actions to test only supported Go versions (1.18, 1.19) +- Updated dependencies (ie, Google Cloud, Azure, AWS sdks) + +## [6.5.2] - 2022-07-08 +### Fixed +- fixed edge cases where in-memory files either weren't found when they should be, or didn't reset state on new writes + +### Fixed +- Replaced pre-generated ssh keypair in sftp options tests with auto-generated pair to remediate false-positive security alerts for hard-coded credentials. + +## [6.5.1] - 2022-07-08 + +### Fixed +- Fixed the azure deleteAllVersions to fetch versions using file path instead of location path + +## [6.5.0] - 2022-07-08 + +### Added +- Add DeleteAllVersions option support for Azure to remove all versions of a file + +### Fixed +- Fixed golangci-lint issues +- Fixed #90 - integration were tests failing on move for mem-to-mem move and bad mutex unlock call when doing mem to non-mem move + +## [6.4.0] - 2022-06-27 + +### Added +- Add DeleteAllVersions option support for S3 to remove all versions of a file +- Add DeleteAllVersions option support for GS to remove all versions of a file + +### Security +- Update dependencies +- Update .gvmrc + +## [6.3.0] - 2022-05-16 +### Security +- Update go.mod min version to Go 1.17 +- Update actions/setup-go action to v3 +- Update actions/checkout action to v3 +- Update golangci/golangci-lint-action action to v3 +- Update actions to test only supported Go versions (1.17, 1.18) +- Updated dependencies (ie, Google Cloud, Azure, AWS sdks) + +## [6.2.0] - 2022-04-28 +### Added +- Add S3 option ForcePathStyle. Fixes #96 +- Add S3 option DisableServerSideEncryption. Fixes #98 + +## [6.1.0] - 2022-02-18 +### Added +- Added utils.PathToURI() func to change path to OS URI. resolves #112 + +## [6.0.2] - 2021-12-03 +### Fixed +- fixed linting issues with missing godoc on exported functions and new build tag formatting. +- fixed #92 (broken by #72) where calling ListByPrefix() was fail from non-root locations when calling file-level prefixes. +- fixed azure helper func for vfssimple, ensuring it works on File URIs in addition to Location URIs +- fixed #97 by updating vfssimple logic to ensure the most specific registered backend that matches a url is used, not just the first one it comes across. Updated vfssimple docs. +- Added vfssimple tests. Zero to 100% coverage. +- Fix codecov validation which got lost when converting to github actions. Removed .travis.yml. + +## [6.0.1] - 2021-11-07 +### Fixed +- fixed #103 Ensures that sftp backend closed underlying SSH connection on autodisconnect, not just SFTP subsystem. + +## [6.0.0] - 2021-09-29 +### Changed +- Modified sftp Key Exchange option to accept an array instead of a string, allowing multiple kex algorithms + +### Upgrade steps + +With v6.0.0, sftp.Options struct changed to accept an array of Key Exchange algorithms rather than a string. To update, change the syntax of the auth commands. +``` +"keyExchanges":"diffie-hellman-group-a256" +``` +becomes +``` +"keyExchanges":["diffie-hellman-group-a256"] +``` + +## [5.10.0] - 2021-09-16 +### Changed +- Modified S3 file.go so that on the initial read when the remote file is downloaded, a temporary file is locally created +using concurrent go routines to download parts of the file vs. a single request to download the whole object +### Fixed +- Fixed #100 Rolled back Seek validation before Copy or Move in SFTP backend due to bug on some SFTP servers and how we cache open "file handles" + +## [5.9.0] - 2021-09-07 +### Added +- Add support (and tests) for Go 1.17. Updated dependencies. +- Updated golanci.yml config. +### Fixed +- fixed broken backend integration test. + +## [5.8.0] - 2021-08-24 +### Fixed +- fixed #82 Return error when CopyTo/MoveTo functions are called when Seek offset is not (0,0) for all backends, +not just GCS. +- fixed #84 where sftp connections were never disconnected in a long-lived app. Added Close to sftp backend Client interface. Close client automatically 10 seconds (configurable in Options) after connecting unless reset by calling some server request action. +### Deprecated +- Deprecating utils.TouchCopy (#89). +### Added +- Added utils.TouchCopyBuffered(#89). TouchCopyBuffered uses a min buffer size of 256MB and allows for a + custom buffer size to be set with filesystem options. Improves large file transfer with negligible impact on smaller file transfers. + +## [5.7.0] - 2021-07-23 +### Added +- Add support of keyexchanges algorithm as a sftp option + +## [5.6.0] - 2021-07-19 +### Fixed +- fixed #71 Listing a Google Cloud Storage bucket does not return things in the root of the bucket +- fixed #78 Return error when CopyTo/MoveTo functions are called when Seek offset is not (0,0) for GS +### Performance +- fixed #74 utils.TouchCopy() unnecessarily calls file.Size() + +## [5.5.7] - 2021-05-12 +### Fixed +- fixed: do not offset number of bytes read for memfs (#70) Jeffrey Larson +- fixed S3 VFS backend throws an error when you open then close a file. shivanigaurh-c2fo +- fixed minor markdown doc formatting of type headers +### Added +- Add Azure Blob Storage backend. Dusty McIlvain +- Add github actions for testing and linting (golangci-lint) + +## [5.5.6] - 2021-02-07 +### Fixed +- fixed failing test due to CI asking for mod tidy. +- fixed ineffectual assignment failures keeping vfs from achieving 100% in goreportcard. +- fixed #48 bug where 'invalid cross-device link' error occurs when renaming os files that are found on different devices/volumes including temp dirs. +- fixed #53 bug where s3 file tests were failing due to a deferred removal of a temp file. + +### Security +- Upgraded dependencies. Required regen of S3 API mock due to a couple new methods. + SSH returns a different error text since Go 1.14 (only affects 1 test). + +## [5.5.5] - 2020-12-11 +### Fixed +- fixes #49 - Update sftp.Options struct tags + +## [5.5.4] - 2020-07-06 +### Fixed +- fixes #21 - OS file.Location().ChangeDir(something/) should not affect location +- fixes #44 - make S3 waitUntilFileExists Exists error more descriptive of root error +- fixes #45 - fix ASCII diagram alignment for SFTP doc +### Added +- Improved test coverage for os.FileSystem + +## [5.5.3] - 2020-05-11 +### Fixed +- Addresses an issue where writes/touch calls on the vfs.File backend did not work properly on AWS-hosted SFTP environments. (See https://github.com/pkg/sftp/pull/310/files) + +## [5.5.2] - 2020-04-23 +### Fixed +- Ensure that writing truncates existing file. Fixes #40 + +## [5.5.1] - 2020-02-20 +### Fixed +- Ensure that spaces (url encoded or not) in filenames and paths work as expected when copying (particularly for s3 to s3 native copyObject). Fixes #36. + +## [5.5.0] - 2019-09-09 +### Added +- SFTP backend vfs implementation + +## [5.4.0] - 2019-08-24 +### Added +- Added fallback to TouchCopy for S3/GCS implementations for CopyTo and MoveTo operations between files and locations which use different authentication and region configurations. When possible, the directory copy/move operations will be used by the underlying clients. + +## [5.3.0] - 2019-08-24 +### Added +- ACL can now be passed in as an s3.Option value. See +https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl for values. + +## [5.2.3] - 2019-08-07 +### Fixed +- The GS implementation of location.List() returned an empty string for files found inside a persistent "folder" object +created in the UI or by createing a zero-length object ending with "/". These objects are now ignored. +- Fixed a gs-to-gs copy bug that became visible with integration tests aded in v5.2.2. + +## [5.2.2] - 2019-07-17 +### Fixed +- The OS implementation of file.MoveToLocation() now ensures that the target directory exists before attempting to move the file + +## [5.2.1] - 2019-07-17 +### Fixed +- Missing error check in utils.UpdateLastModifiedByMoving() +- Minor goreportcard checks failed. Now 100% + +## [5.2.0] - 2019-07-16 +### Added +- In-Memory backend vfs implementation +- Included the in-memory backend to the list of registered backends used in backend_integration_test +- Checking for existence at the start of various File functions (size, seek, etc) and returning and error if the file does not exist on the OS backend +- Tests in backend_integration_test that ensure that operations done on non-existent files throw an error +- mem.md in vfs/docs, updated link to it in the README.md +### Fixed +- Relative path validation in utils.go now disallows empty names +### Changed +- utils_test.go now expects an empty string NOT to validate +- updated README to include "Touch()" definition under the File interface rather than the Location interface +- updated README to exclude "in-memory-backend" from the list of ideas + + +## [5.1.0] - 2019-07-08 +### Added +- Added Touch() method to File interface and implemented in each backend. +- Increased test coverage in both os and s3 backends. + +## [5.0.0] - 2019-06-18 +### Changed +- Defined stricter definitions in interfaces, largely related to input requiements being more +URI compliant. +- FileSystem's NewFile and NewLocation required absolute paths. Locations NewFile and NewLocation require relative paths. +- Added/Updated several new utils functions for URI validation across implementations. +- Ensure native methods are being used for atomic functions like copy or move/rename where possible +- Document expected behavior with empty readers in all copy and move function as well as io.Copy +- ListByPrefix now allows relative paths. +### Added +- Integration test suite for backened developers that ensures expected interface behaviors and tests interactions +between implementations. +- Examples of io.Copy usage in docs. +### Fixed +- Fixed [#10](https://github.com/C2FO/vfs/issues/10) +- Fixed vfssimple bug that would return os file system if the uri had the word 'file' anywhere in it even if was s3, like s3://bucket/path/to/file.txt +- Fixed bug where MoveToLocation was returning the old file with updated location rather +than a brand new file with new location. Since File may represent a non-existent file, the original file should still be valid. + +## [4.0.0] - 2019-05-22 + +### Release Notes + +1. Added the Retry() method to the FileSystem interface, along with a no-op default retryer. +2. Integrated the newly defined retryer interface into the GCS implementation of VFS and now allow the retry method to be injected as a FileSystem option. +3. Integrated the request.Retryer from the s3 client as an injectable implementation for the S3 vfs.Option. + +### Upgrade steps + +With v4.0.0, we introduced the ability to add a 'retry' option to each registered file system. This version bump will require an update to any existing FileSystem implementations. If a custom retryer is not required by the implemented file system, the default retryer can be returned. + +``` +func (fs *FileSystem) Retry() vfs.Retry { + return vfs.DefaultRetryer() +} +``` + +## [3.0.0] - 2019-04-22 + +Initial release for go modules compliant project. + +## [2.1.4] - 2019-04-05 +### Fixed +- Remove go modules from v2 since we don't prefix path with `/v2`. + See github.com/c2fo/vfs/issues/14. diff --git a/vendor/github.com/c2fo/vfs/v6/CODE_OF_CONDUCT.md b/vendor/github.com/c2fo/vfs/v6/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..24632a02d --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/CODE_OF_CONDUCT.md @@ -0,0 +1,128 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +john.judd@c2fo.com. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct +enforcement ladder](https://github.com/mozilla/diversity). + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at +https://www.contributor-covenant.org/translations. diff --git a/vendor/github.com/c2fo/vfs/v6/License.md b/vendor/github.com/c2fo/vfs/v6/License.md new file mode 100644 index 000000000..e584b88e7 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/License.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2019-2021 C2FO, Inc + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/c2fo/vfs/v6/README.md b/vendor/github.com/c2fo/vfs/v6/README.md new file mode 100644 index 000000000..b9b23c3d3 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/README.md @@ -0,0 +1,494 @@ +# vfs + +--- + +[![GitHub tag](https://img.shields.io/github/tag/c2fo/vfs.svg?style=flat)](https://github.com/c2fo/vfs/releases) +[![Build Status](https://travis-ci.org/C2FO/vfs.svg?branch=master)](https://travis-ci.org/C2FO/vfs) +[![GoDoc](https://pkg.go.dev/badge/github.com/c2fo/vfs/v6?utm_source=godoc)](https://pkg.go.dev/github.com/c2fo/vfs/v6) +[![codecov](https://codecov.io/gh/c2fo/vfs/branch/master/graph/badge.svg)](https://codecov.io/gh/c2fo/vfs) +[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](License.md) +[![Go Report Card](https://goreportcard.com/badge/github.com/c2fo/vfs)](https://goreportcard.com/report/github.com/c2fo/vfs) +[![Mentioned in Awesome Go](https://awesome.re/mentioned-badge.svg)](https://github.com/avelino/awesome-go) + +Package vfs provides a pluggable, extensible, and opinionated set of file system +functionality for Go across a number of file system types such as os, Azure, S3, GCS +and SFTP. + + +### Philosophy + +When building our platform, initially we wrote a library that was something to +the effect of + +```go + if config.DISK == "S3" { + // do some s3 file system operation + } else if config.DISK == "mock" { + // fake something + } else { + // do some native os.xxx operation + } +``` + +Not only was ugly but because the behaviors of each "file system" were +different and we had to constantly alter the file locations and pass a bucket string (even +if the file system didn't know what a bucket was). + +We found a handful of third-party libraries that were interesting but none of +them had everything we needed/wanted. Of particular inspiration was +https://github.com/spf13/afero in its composition of the super-powerful stdlib +[io.*](https://godoc.org/io) interfaces. Unfortunately, it didn't support Google Cloud Storage and there +was still a lot of passing around of strings and structs. Few, if any, of the +vfs-like libraries provided interfaces to easily and confidently create new +file system backends. + +###### What we needed/wanted was the following(and more): + +* self-contained set of structs that could be passed around like a file/dir handle +* the struct would represent an existing or nonexistent file/dir +* provide common (and only common) functionality across all file system so that after initialization, we don't care + what the underlying file system is and can therefore write our code agnostically/portably +* use [io.*](https://godoc.org/io) interfaces such as [io.Reader](https://godoc.org/io#Reader) and [io.Writer](https://godoc.org/io#Writer) without needing to call a separate function +* extensibility to easily add other needed file systems like Microsoft Azure Cloud File Storage +* prefer native atomic functions when possible (ie S3 to S3 moving would use the native move api call rather than + copy-delete) +* a uniform way of addressing files regardless of file system. This is why we use complete URI's in vfssimple +* [fmt.Stringer](https://godoc.org/fmt#Stringer) interface so that the file struct passed to a log message (or other Stringer use) would show the URI +* mockable file system +* pluggability so that third-party implementations of our interfaces could be used + + +### Install + +Pre 1.17: +``` +go get -u github.com/c2fo/vfs/v6 +``` + +Post 1.17: +``` +go install github.com/c2fo/vfs/v6 +``` + +### Upgrading + +#### Upgrading from v5 to v6 +With v6.0.0, sftp.Options struct changed to to accept an array of Key Exchange algorithms rather than a string. To update, change the syntax of the auth commands. +``` + "keyExchanges":"diffie-hellman-group-a256" +``` +becomes +``` + "keyExchanges":["diffie-hellman-group-a256"] +``` + +### Usage + +We provide [vfssimple](docs/vfssimple.md) as basic way of initializing file system backends (see each +implementations's docs about authentication). [vfssimple](docs/vfssimple.md) pulls in every c2fo/vfs +backend. If you need to reduce the backend requirements (and app memory +footprint) or add a third party backend, you'll need to implement your own +"factory". See [backend](docs/backend.md) doc for more info. + +You can then use those file systems to initialize locations which you'll be +referencing frequently, or initialize files directly + +```go + osFile, err := vfssimple.NewFile("file:///path/to/file.txt") + s3File, err := vfssimple.NewFile("s3://bucket/prefix/file.txt") + + osLocation, err := vfssimple.NewLocation("file:///tmp/") + s3Location, err := vfssimple.NewLocation("s3://bucket/") + + osTmpFile, err := osLocation.NewFile("anotherFile.txt") // file at /tmp/anotherFile.txt +``` + +You can perform a number of actions without any consideration for the system's api or implementation details. + +```go + osFileExists, err := osFile.Exists() // true, nil + s3FileExists, err := s3File.Exists() // false, nil + err = osFile.CopyToFile(s3File) // nil + s3FileExists, err = s3File.Exists() // true, nil + + movedOsFile, err := osFile.MoveToLocation(osLocation) + osFileExists, err = osFile.Exists() // false, nil (move actions delete the original file) + movedOsFileExists, err := movedOsFile.Exists() // true, nil + + s3FileUri := s3File.URI() // s3://bucket/prefix/file.txt + s3FileName := s3File.Name() // file.txt + s3FilePath := s3File.Path() // /prefix/file.txt +``` + +File's [io.*](https://godoc.org/io) interfaces may be used directly: + +```go + reader := strings.NewReader("Clear is better than clever") + gsFile, err := vfssimple.NewFile("gs://somebucket/path/to/file.txt") + + byteCount, err := io.Copy(gsFile, reader) + err := gsFile.Close() +``` + +Note: [io.Copy()](https://godoc.org/io#Copy) doesn't strictly define what happens if a reader is empty. This is complicated because io.Copy +will first delegate actual copying in the following: + 1. if the io.Reader also implements io.WriterTo, WriteTo() will do the copy + 2. if the io.Writer also implements io.ReaderFrom, ReadFrom() will do the copy + 3. finally, if neither 1 or 2, io.Copy will do it's own buffered copy + +In case 3, and most implementations of cases 1 and 2, if reader is empty, Write() never gets called. What that means for +vfs is there is no way for us to ensure that an empty file does or doesn't get written on an io.Copy(). For instance +OS always creates a file, regardless of calling Write() whereas S3 must Write() and Close(). + +As such, vfs cannot guarantee copy behavior except in our own CopyToFile, MoveToFile, CopyToLocation, and MoveToLocation +functions. If you need to ensure a file gets copied/moved with io.Copy(), you must do so yourself OR use vfs's [utils.TouchCopy](docs/utils.md) + + +### Third-party Backends + + * none so far + +Feel free to send a pull request if you want to add your backend to the list. + +### See also: +* [vfscp](docs/vfscp.md) +* [vfssimple](docs/vfssimple.md) +* [backend](docs/backend.md) + * [os backend](docs/os.md) + * [gs backend](docs/gs.md) + * [s3 backend](docs/s3.md) + * [in-memory backend](docs/mem.md) + * [sftp backend](docs/sftp.md) + * [ftp backend](docs/ftp.md) + * [azure backend](docs/azure.md) +* [utils](docs/utils.md) + +### Ideas + +Things to add: +* Provide better List() functionality with more abstracted filtering and paging (iterator?) Return File structs vs URIs? +* Add better/any context.Context() support + + +### Contributors + +Brought to you by the Enterprise Pipeline team at C2FO: + +* John Judd - john.judd@c2fo.com + +* Dustin Schnee - dustin.schnee@c2fo.com + +* Katie Hrenchir Shields - katie.shields@c2fo.com + +* Grant Higgins - grant.higgins@c2fo.com + +* Pooja Dhondge - pooja.dhondge@c2fo.com + +* Chris Roush – chris.roush@c2fo.com + +* Anthony Ross - anthony.ross@c2fo.com + +https://github.com/c2fo/ + +### Contributing + + 1. Fork it () + 2. Create your feature branch (`git checkout -b feature/fooBar`) + 3. Commit your changes (`git commit -am 'Add some fooBar'`) + 4. Push to the branch (`git push origin feature/fooBar`) + 5. Create a new Pull Request + + +### License + +Distributed under the MIT license. See `http://github.com/c2fo/vfs/License.md +for more information. + + +### Definitions + +###### absolute path +- A path is said to be absolute if it provides the entire context +need to find a file, including the file system root. An absolute path must +begin with a slash and may include . and .. directories. + +###### file path +- A file path ends with a filename and therefore may not end with a slash. It may be relative or absolute. + +###### location path +- A location/directory path must end with a slash. It may be relative or absolute. + +###### relative path +- A relative path is a way to locate a directory or file relative to +another directory. A relative path may not begin with a slash but may include . +and .. directories. + +###### URI +- A Uniform Resource Identifier (URI) is a string of characters that +unambiguously identifies a particular resource. To guarantee uniformity, all +URIs follow a predefined set of syntax rules, but also maintain extensibility +through a separately defined hierarchical naming scheme (e.g. http://). + +## Interfaces + +#### type File + +```go +type File interface { + io.Closer + io.Reader + io.Seeker + io.Writer + fmt.Stringer + + // Exists returns boolean if the file exists on the file system. Returns an error, if any. + Exists() (bool, error) + + // Location returns the vfs.Location for the File. + Location() Location + + // CopyToLocation will copy the current file to the provided location. + // + // * Upon success, a vfs.File, representing the file at the new location, will be returned. + // * In the case of an error, nil is returned for the file. + // * CopyToLocation should use native functions when possible within the same scheme. + // * If the file already exists at the location, the contents will be overwritten with the current file's contents. + // * CopyToLocation will Close both the source and target Files which therefore can't be appended to without first + // calling Seek() to move the cursor to the end of the file. + CopyToLocation(location Location) (File, error) + + // CopyToFile will copy the current file to the provided file instance. + // + // * In the case of an error, nil is returned for the file. + // * CopyToLocation should use native functions when possible within the same scheme. + // * If the file already exists, the contents will be overwritten with the current file's contents. + // * CopyToFile will Close both the source and target Files which therefore can't be appended to without first + // calling Seek() to move the cursor to the end of the file. + CopyToFile(file File) error + + // MoveToLocation will move the current file to the provided location. + // + // * If the file already exists at the location, the contents will be overwritten with the current file's contents. + // * If the location does not exist, an attempt will be made to create it. + // * Upon success, a vfs.File, representing the file at the new location, will be returned. + // * In the case of an error, nil is returned for the file. + // * When moving within the same Scheme, native move/rename should be used where possible. + // * If the file already exists, the contents will be overwritten with the current file's contents. + // * MoveToLocation will Close both the source and target Files which therefore can't be appended to without first + // calling Seek() to move the cursor to the end of the file. + MoveToLocation(location Location) (File, error) + + // MoveToFile will move the current file to the provided file instance. + // + // * If the file already exists, the contents will be overwritten with the current file's contents. + // * The current instance of the file will be removed. + // * MoveToFile will Close both the source and target Files which therefore can't be appended to without first + // calling Seek() to move the cursor to the end of the file. + MoveToFile(file File) error + + // Delete unlinks the File on the file system. + Delete() error + + // LastModified returns the timestamp the file was last modified (as *time.Time). + LastModified() (*time.Time, error) + + // Size returns the size of the file in bytes. + Size() (uint64, error) + + // Path returns absolute path, including filename, ie /some/path/to/file.txt + // + // If the directory portion of a file is desired, call + // someFile.Location().Path() + Path() string + + // Name returns the base name of the file path. + // + // For file:///some/path/to/file.txt, it would return file.txt + Name() string + + // Touch creates a zero-length file on the vfs.File if no File exists. Update File's last modified timestamp. + // Returns error if unable to touch File. + Touch() error + + // URI returns the fully qualified absolute URI for the File. IE, s3://bucket/some/path/to/file.txt + URI() string +} +``` + +File represents a file on a file system. A File may or may not actually exist on +the file system. + +#### type FileSystem + +```go +type FileSystem interface { + // NewFile initializes a File on the specified volume at path 'absFilePath'. + // + // * Accepts volume and an absolute file path. + // * Upon success, a vfs.File, representing the file's new path (location path + file relative path), will be returned. + // * On error, nil is returned for the file. + // * Note that not all file systems will have a "volume" and will therefore be "": + // file:///path/to/file has a volume of "" and name /path/to/file + // whereas + // s3://mybucket/path/to/file has a volume of "mybucket and name /path/to/file + // results in /tmp/dir1/newerdir/file.txt for the final vfs.File path. + // * The file may or may not already exist. + NewFile(volume string, absFilePath string) (File, error) + + // NewLocation initializes a Location on the specified volume with the given path. + // + // * Accepts volume and an absolute location path. + // * The file may or may not already exist. Note that on key-store file systems like S3 or GCS, paths never truly exist. + // * On error, nil is returned for the location. + // + // See NewFile for note on volume. + NewLocation(volume string, absLocPath string) (Location, error) + + // Name returns the name of the FileSystem ie: Amazon S3, os, Google Cloud Storage, etc. + Name() string + + // Scheme returns the uri scheme used by the FileSystem: s3, file, gs, etc. + Scheme() string + + // Retry will return the retry function to be used by any file system. + Retry() Retry +} +``` + +FileSystem represents a file system with any authentication accounted for. + +#### type Location + +```go +type Location interface { + // String returns the fully qualified absolute URI for the Location. IE, file://bucket/some/path/ + fmt.Stringer + + // List returns a slice of strings representing the base names of the files found at the Location. + // + // * All implementations are expected to return ([]string{}, nil) in the case of a non-existent directory/prefix/location. + // * If the user cares about the distinction between an empty location and a non-existent one, Location.Exists() should + // be checked first. + List() ([]string, error) + + // ListByPrefix returns a slice of strings representing the base names of the files found in Location whose filenames + // match the given prefix. + // + // * All implementations are expected to return ([]string{}, nil) in the case of a non-existent directory/prefix/location. + // * "relative" prefixes are allowed, ie, listByPrefix from "/some/path/" with prefix "to/somepattern" is the same as + // location "/some/path/to/" with prefix of "somepattern" + // * If the user cares about the distinction between an empty location and a non-existent one, Location.Exists() should + // be checked first. + ListByPrefix(prefix string) ([]string, error) + + // ListByRegex returns a slice of strings representing the base names of the files found in Location that matched the + // given regular expression. + // + // * All implementations are expected to return ([]string{}, nil) in the case of a non-existent directory/prefix/location. + // * If the user cares about the distinction between an empty location and a non-existent one, Location.Exists() should + // be checked first. + ListByRegex(regex *regexp.Regexp) ([]string, error) + + // Volume returns the volume as string. In URI parlance, volume equates to authority. + // For example s3://mybucket/path/to/file.txt, volume would return "mybucket". + // + // Note: Some file systems may not have a volume and will return "". + Volume() string + + // Path returns absolute location path, ie /some/path/to/. An absolute path must be resolved to it's shortest path: + // see path.Clean + Path() string + + // Exists returns boolean if the location exists on the file system. Returns an error if any. + Exists() (bool, error) + + // NewLocation is an initializer for a new Location relative to the existing one. + // + // Given location: + // loc := fs.NewLocation(:s3://mybucket/some/path/to/") + // calling: + // newLoc := loc.NewLocation("../../") + // would return a new vfs.Location representing: + // s3://mybucket/some/ + // + // * Accepts a relative location path. + NewLocation(relLocPath string) (Location, error) + + // Given location: + // loc := fs.NewLocation("file:///some/path/to/") + // calling: + // loc.ChangeDir("../../") + // would update the current location instance to + // file:///some/. + // + // * ChangeDir accepts a relative location path. + ChangeDir(relLocPath string) error + + //FileSystem returns the underlying vfs.FileSystem struct for Location. + FileSystem() FileSystem + + // NewFile will instantiate a vfs.File instance at or relative to the current location's path. + // + // * Accepts a relative file path. + // * In the case of an error, nil is returned for the file. + // * Resultant File path will be the shortest path name equivalent of combining the Location path and relative path, if any. + // ie, /tmp/dir1/ as location and relFilePath "newdir/./../newerdir/file.txt" + // results in /tmp/dir1/newerdir/file.txt for the final vfs.File path. + // * Upon success, a vfs.File, representing the file's new path (location path + file relative path), will be returned. + // * The file may or may not already exist. + NewFile(relFilePath string) (File, error) + + // DeleteFile deletes the file of the given name at the location. + // + // This is meant to be a short cut for instantiating a new file and calling delete on that, with all the necessary + // error handling overhead. + // + // * Accepts relative file path. + DeleteFile(relFilePath string) error + + // URI returns the fully qualified absolute URI for the Location. IE, s3://bucket/some/path/ + // + // URI's for locations must always end with a slash. + URI() string +} +``` + +Location represents a file system path which serves as a start point for +directory-like functionality. A location may or may not actually exist on the +file system. + +#### type Options + +```go +type Options interface{} +``` + +Options are structs that contain various options specific to the file system + +#### type Retry + +```go +type Retry func(wrapped func() error) error +``` + +Retry is a function that can be used to wrap any operation into a definable +retry operation. The wrapped argument is called by the underlying VFS +implementation. + +Ex: +```go + var retrier Retry = func(wrapped func() error) error { + var ret error + for i := 0; i < 5; i++ { + if err := wrapped(); err != nil { ret = err; continue } + } + return ret + } +``` +#### func DefaultRetryer + +```go +func DefaultRetryer() Retry +``` +DefaultRetryer returns a no-op retryer which simply calls the wrapped command +without looping. diff --git a/vendor/github.com/c2fo/vfs/v6/backend/all/all.go b/vendor/github.com/c2fo/vfs/v6/backend/all/all.go new file mode 100644 index 000000000..9fcacbf77 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/all/all.go @@ -0,0 +1,12 @@ +// Package all imports all VFS implementations. +package all + +import ( + _ "github.com/c2fo/vfs/v6/backend/azure" // register azure backend + _ "github.com/c2fo/vfs/v6/backend/ftp" // register sftp backend + _ "github.com/c2fo/vfs/v6/backend/gs" // register gs backend + _ "github.com/c2fo/vfs/v6/backend/mem" // register mem backend + _ "github.com/c2fo/vfs/v6/backend/os" // register os backend + _ "github.com/c2fo/vfs/v6/backend/s3" // register s3 backend + _ "github.com/c2fo/vfs/v6/backend/sftp" // register sftp backend +) diff --git a/vendor/github.com/c2fo/vfs/v6/backend/azure/client.go b/vendor/github.com/c2fo/vfs/v6/backend/azure/client.go new file mode 100644 index 000000000..ae41f13e6 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/azure/client.go @@ -0,0 +1,271 @@ +package azure + +import ( + "context" + "fmt" + "io" + "net/url" + "strings" + "time" + + "github.com/Azure/azure-pipeline-go/pipeline" + "github.com/Azure/azure-storage-blob-go/azblob" + + "github.com/c2fo/vfs/v6" + "github.com/c2fo/vfs/v6/utils" +) + +// The Client interface contains methods that perform specific operations to Azure Blob Storage. This interface is +// here so we can write mocks over the actual functionality. +type Client interface { + // Properties should return a BlobProperties struct for the blob specified by locationURI, and filePath. If the + // blob is not found an error should be returned. + Properties(locationURI, filePath string) (*BlobProperties, error) + + // SetMetadata should add the metadata specified by the parameter metadata for the blob specified by the parameter + // file. + SetMetadata(file vfs.File, metadata map[string]string) error + + // Upload should create or update the blob specified by the file parameter with the contents of the content + // parameter + Upload(file vfs.File, content io.ReadSeeker) error + + // Download should return a reader for the blob specified by the file parameter + Download(file vfs.File) (io.ReadCloser, error) + + // Copy should copy the file specified by srcFile to the file specified by tgtFile + Copy(srcFile vfs.File, tgtFile vfs.File) error + + // List should return a listing for the specified location. Listings should include the full path for the file. + List(l vfs.Location) ([]string, error) + + // Delete should delete the file specified by the parameter file. + Delete(file vfs.File) error + + // DeleteAllVersions should delete all versions of the file specified by the parameter file. + DeleteAllVersions(file vfs.File) error +} + +// DefaultClient is the main implementation that actually makes the calls to Azure Blob Storage +type DefaultClient struct { + pipeline pipeline.Pipeline +} + +// NewClient initializes a new DefaultClient +func NewClient(options *Options) (*DefaultClient, error) { + credential, err := options.Credential() + if err != nil { + return nil, err + } + + // This configures the client to use the default retry policy. The default policy uses exponential backoff with + // maxRetries = 4. If this behavior needs to be changed, add the Retry member to azblob.PipelineOptions. For + // more information on azure retry policies see https://pkg.go.dev/github.com/Azure/azure-storage-blob-go/azblob#RetryOptions + // + // Example (this is not the default): + // RetryOptions{ + // Policy: RetryPolicyExponential, // Use exponential backoff as opposed to linear + // MaxTries: 3, // Try at most 3 times to perform the operation (set to 1 to disable retries) + // TryTimeout: time.Second * 3, // Maximum time allowed for any single try + // RetryDelay: time.Second * 1, // Backoff amount for each retry (exponential or linear) + // MaxRetryDelay: time.Second * 3, // Max delay between retries + // } + pl := azblob.NewPipeline(credential, azblob.PipelineOptions{}) + + return &DefaultClient{pl}, nil +} + +// Properties fetches the properties for the blob specified by the parameters containerURI and filePath +func (a *DefaultClient) Properties(containerURI, filePath string) (*BlobProperties, error) { + URL, err := url.Parse(containerURI) + if err != nil { + return nil, err + } + containerURL := azblob.NewContainerURL(*URL, a.pipeline) + + if filePath == "" { + // this is only used to check for the existence of a container so we don't care about anything but the + // error + _, err := containerURL.GetProperties(context.Background(), azblob.LeaseAccessConditions{}) + if err != nil { + return nil, err + } + return nil, nil + } + + blobURL := containerURL.NewBlockBlobURL(utils.RemoveLeadingSlash(filePath)) + resp, err := blobURL.GetProperties(context.Background(), azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) + if err != nil { + return nil, err + } + return NewBlobProperties(resp), nil +} + +// Upload uploads a new file to Azure Blob Storage +func (a *DefaultClient) Upload(file vfs.File, content io.ReadSeeker) error { + URL, err := url.Parse(file.Location().(*Location).ContainerURL()) + if err != nil { + return err + } + + containerURL := azblob.NewContainerURL(*URL, a.pipeline) + blobURL := containerURL.NewBlockBlobURL(utils.RemoveLeadingSlash(file.Path())) + _, err = blobURL.Upload(context.Background(), content, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, + azblob.BlobAccessConditions{}, azblob.DefaultAccessTier, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) + return err +} + +// SetMetadata sets the given metadata for the blob +func (a *DefaultClient) SetMetadata(file vfs.File, metadata map[string]string) error { + URL, err := url.Parse(file.Location().(*Location).ContainerURL()) + if err != nil { + return err + } + + containerURL := azblob.NewContainerURL(*URL, a.pipeline) + blobURL := containerURL.NewBlockBlobURL(utils.RemoveLeadingSlash(file.Path())) + _, err = blobURL.SetMetadata(context.Background(), metadata, azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) + return err +} + +// Download returns an io.ReadCloser for the given vfs.File +func (a *DefaultClient) Download(file vfs.File) (io.ReadCloser, error) { + URL, err := url.Parse(file.Location().(*Location).ContainerURL()) + if err != nil { + return nil, err + } + + containerURL := azblob.NewContainerURL(*URL, a.pipeline) + blobURL := containerURL.NewBlockBlobURL(utils.RemoveLeadingSlash(file.Path())) + get, err := blobURL.Download(context.Background(), 0, 0, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{}) + if err != nil { + return nil, err + } + return get.Body(azblob.RetryReaderOptions{}), nil +} + +// Copy copies srcFile to the destination tgtFile within Azure Blob Storage. Note that in the case where we get +// encoded spaces in the file name (i.e. %20) the '%' must be encoded or the copy command will return a not found +// error. +func (a *DefaultClient) Copy(srcFile, tgtFile vfs.File) error { + // Can't use url.PathEscape here since that will escape everything (even the directory separators) + srcURL, err := url.Parse(strings.Replace(srcFile.URI(), "%", "%25", -1)) + if err != nil { + return err + } + + tgtURL, err := url.Parse(tgtFile.Location().(*Location).ContainerURL()) + if err != nil { + return err + } + + containerURL := azblob.NewContainerURL(*tgtURL, a.pipeline) + blobURL := containerURL.NewBlockBlobURL(utils.RemoveLeadingSlash(tgtFile.Path())) + ctx := context.Background() + resp, err := blobURL.StartCopyFromURL(ctx, *srcURL, azblob.Metadata{}, azblob.ModifiedAccessConditions{}, + azblob.BlobAccessConditions{}, azblob.DefaultAccessTier, nil) + if err != nil { + return err + } + + for resp.CopyStatus() == azblob.CopyStatusPending { + time.Sleep(2 * time.Second) + } + + if resp.CopyStatus() == azblob.CopyStatusSuccess { + return nil + } + + return fmt.Errorf("copy failed ERROR[%s]", resp.ErrorCode()) +} + +// List will return a listing of the contents of the given location. Each item in the list will contain the full key +// as specified by the azure blob (incliding the virtual 'path'). +func (a *DefaultClient) List(l vfs.Location) ([]string, error) { + URL, err := url.Parse(l.(*Location).ContainerURL()) + if err != nil { + return []string{}, err + } + + containerURL := azblob.NewContainerURL(*URL, a.pipeline) + ctx := context.Background() + var list []string + for marker := (azblob.Marker{}); marker.NotDone(); { + listBlob, err := containerURL.ListBlobsHierarchySegment(ctx, marker, "/", + azblob.ListBlobsSegmentOptions{Prefix: utils.RemoveLeadingSlash(l.Path())}) + if err != nil { + return []string{}, err + } + + marker = listBlob.NextMarker + + for i := range listBlob.Segment.BlobItems { + list = append(list, listBlob.Segment.BlobItems[i].Name) + } + } + return list, nil +} + +// Delete deletes the given file from Azure Blob Storage. +func (a *DefaultClient) Delete(file vfs.File) error { + URL, err := url.Parse(file.Location().(*Location).ContainerURL()) + if err != nil { + return err + } + + containerURL := azblob.NewContainerURL(*URL, a.pipeline) + blobURL := containerURL.NewBlockBlobURL(utils.RemoveLeadingSlash(file.Path())) + _, err = blobURL.Delete(context.Background(), azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}) + return err +} + +// DeleteAllVersions deletes all the versions of the given file from Azure Blob Storage. +// First the file blob is deleted, then each version of the blob is deleted. +// If soft deletion is enabled for blobs in the storage account, each version will be marked for deletion and will be +// permanently deleted by Azure as per the soft deletion policy. +func (a *DefaultClient) DeleteAllVersions(file vfs.File) error { + URL, err := url.Parse(file.Location().(*Location).ContainerURL()) + if err != nil { + return err + } + + containerURL := azblob.NewContainerURL(*URL, a.pipeline) + blobURL := containerURL.NewBlockBlobURL(utils.RemoveLeadingSlash(file.Path())) + + versions, err := a.getBlobVersions(containerURL, utils.RemoveLeadingSlash(file.Path())) + if err != nil { + return err + } + + for _, version := range versions { + // Delete a specific version + _, err = blobURL.WithVersionID(*version).Delete(context.Background(), azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}) + if err != nil { + return err + } + } + + return err +} + +func (a *DefaultClient) getBlobVersions(containerURL azblob.ContainerURL, blobName string) ([]*string, error) { + ctx := context.Background() + var versions []*string + for marker := (azblob.Marker{}); marker.NotDone(); { + listBlob, err := containerURL.ListBlobsFlatSegment(ctx, marker, + azblob.ListBlobsSegmentOptions{Prefix: blobName, Details: azblob.BlobListingDetails{Versions: true}}) + if err != nil { + return nil, err + } + + marker = listBlob.NextMarker + + for i := range listBlob.Segment.BlobItems { + blobItem := listBlob.Segment.BlobItems[i] + if blobItem.VersionID != nil { + versions = append(versions, blobItem.VersionID) + } + } + } + return versions, nil +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/azure/doc.go b/vendor/github.com/c2fo/vfs/v6/backend/azure/doc.go new file mode 100644 index 000000000..8b40c6b3c --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/azure/doc.go @@ -0,0 +1,63 @@ +/* +Package azure Microsoft Azure Blob Storage VFS Implementation + +# Usage + +Rely on github.com/c2fo/vfs/backend + + import( + "github.com/c2fo/vfs/v6/backend" + "github.com/c2fo/vfs/v6/backend/azure" + ) + + func UseFs() error { + fs := backend.Backend(azure.Scheme) + ... + } + +Or call directly: + + import "github.com/c2fo/vfs/v6/backend/azure" + + func DoSomething() { + fs := azure.NewFilesystem() + ... + } + +azure can be augmented with the following implementation-specific methods. Backend returns vfs.Filesystem interface so it +would have to be cast as azure.Filesystem to use the following: + + func DoSomething() { + + ... + + // cast if fs was created using backend.Backend(). Not necessary if created directly from azure.NewFilesystem(). + fs = fs.(azure.Filesystem) + + // to pass in client options + fs = fs.WithOptions( + azure.Options{ + AccountName: "...", + AccountKey: "... + }, + ) + + // to pass specific client, for instance mock client + client, _ := azure.NewClient(MockAzureClient{...}) + fs = fs.WithClient(client) + } + +# Authentication + +Authentication, by default, occurs automatically when Client() is called. It looks for credentials in the following places, +preferring the first location found: + + 1. When the ENV vars VFS_AZURE_ENV_NAME, VFS_AZURE_STORAGE_ACCOUNT, VFS_AZURE_TENANT_ID, VFS_AZURE_CLIENT_ID, and + VFS_AZURE_CLIENT_SECRET, authentication is performed using an OAuth Token Authenticator. This will allow access + to containers from multiple storage accounts. + 2. The ENV vars VFS_AZURE_STORAGE_ACCOUNT and VFS_AZURE_STORAGE_KEY, a shared key authenticator is used. This will + allow access to any containers owned by the designated storage account. + 3. If none of the above are present, then an anonymous authenticator is created and only publicly accessible blobs + will be available +*/ +package azure diff --git a/vendor/github.com/c2fo/vfs/v6/backend/azure/file.go b/vendor/github.com/c2fo/vfs/v6/backend/azure/file.go new file mode 100644 index 000000000..1c74a0665 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/azure/file.go @@ -0,0 +1,384 @@ +package azure + +import ( + "errors" + "fmt" + "io" + "os" + "path" + "strings" + "time" + + "github.com/Azure/azure-storage-blob-go/azblob" + + "github.com/c2fo/vfs/v6" + "github.com/c2fo/vfs/v6/backend" + "github.com/c2fo/vfs/v6/options" + "github.com/c2fo/vfs/v6/options/delete" + "github.com/c2fo/vfs/v6/utils" +) + +// File implements the vfs.File interface for Azure Blob Storage +type File struct { + fileSystem *FileSystem + container string + name string + tempFile *os.File + isDirty bool +} + +// Close cleans up all of the backing data structures used for reading/writing files. This includes, closing the +// temp file, uploading the contents of the temp file to Azure Blob Storage (if necessary), and calling Seek(0, 0). +func (f *File) Close() error { + if f.tempFile != nil { + defer func() { + _ = f.tempFile.Close() + f.tempFile = nil + f.isDirty = false + }() + + client, err := f.fileSystem.Client() + if err != nil { + return utils.WrapCloseError(err) + } + + if _, err := f.Seek(0, 0); err != nil { + return utils.WrapCloseError(err) + } + + if f.isDirty { + if err := client.Upload(f, f.tempFile); err != nil { + return utils.WrapCloseError(err) + } + } + } + return nil +} + +// Read implements the io.Reader interface. For this to work with Azure Blob Storage, a temporary local copy of +// the file is created and read operations are performed against that. The temp file is closed and flushed to Azure +// when f.Close() is called. +func (f *File) Read(p []byte) (n int, err error) { + if err := f.checkTempFile(); err != nil { + return 0, utils.WrapReadError(err) + } + read, err := f.tempFile.Read(p) + if err != nil { + // if we got io.EOF, we'll return the read and the EOF error + // because io.Copy looks for EOF to determine if it's done + // and doesn't support error wrapping + if errors.Is(err, io.EOF) { + return read, io.EOF + } + return read, utils.WrapReadError(err) + } + + return read, nil +} + +// Seek implements the io.Seeker interface. For this to work with Azure Blob Storage, a temporary local copy of +// the file is created and operations are performed against that. The temp file is closed and flushed to Azure +// when f.Close() is called. +func (f *File) Seek(offset int64, whence int) (int64, error) { + if err := f.checkTempFile(); err != nil { + return 0, utils.WrapSeekError(err) + } + pos, err := f.tempFile.Seek(offset, whence) + if err != nil { + return 0, utils.WrapSeekError(err) + } + return pos, nil +} + +// Write implements the io.Writer interface. Writes are performed against a temporary local file. The temp file is +// closed and flushed to Azure with f.Close() is called. +func (f *File) Write(p []byte) (int, error) { + if err := f.checkTempFile(); err != nil { + return 0, utils.WrapWriteError(err) + } + + n, err := f.tempFile.Write(p) + if err != nil { + return 0, utils.WrapWriteError(err) + } + + f.isDirty = true + + return n, nil +} + +// String returns the file URI +func (f *File) String() string { + return f.URI() +} + +// Exists returns true/false if the file exists/does not exist on Azure +func (f *File) Exists() (bool, error) { + client, err := f.fileSystem.Client() + if err != nil { + return false, err + } + _, err = client.Properties(f.Location().(*Location).ContainerURL(), f.Path()) + if err != nil { + if err.(azblob.StorageError).ServiceCode() != "BlobNotFound" { + return false, err + } + return false, nil + } + return true, nil +} + +// Location returns a Location instance for the files current location +func (f *File) Location() vfs.Location { + return vfs.Location(&Location{ + fileSystem: f.fileSystem, + container: f.container, + path: path.Dir(f.name), + }) +} + +// CopyToLocation creates a copy of *File, using the file's current name as the new file's +// name at the given location. If the given location is also azure, the azure API for copying +// files will be utilized, otherwise, standard io.Copy will be done to the new file. +func (f *File) CopyToLocation(location vfs.Location) (vfs.File, error) { + newFile, err := location.NewFile(utils.RemoveLeadingSlash(f.Name())) + if err != nil { + return nil, err + } + + if err := f.CopyToFile(newFile); err != nil { + return nil, err + } + + return newFile, nil +} + +// CopyToFile puts the contents of the receiver (f *File) into the passed vfs.File parameter. +func (f *File) CopyToFile(file vfs.File) (err error) { + // Close file (f) reader regardless of an error + defer func() { + // close writer + wErr := file.Close() + // close reader + rErr := f.Close() + // + if err == nil { + if wErr != nil { + err = wErr + } else if rErr != nil { + err = rErr + } + } + }() + + // validate seek is at 0,0 before doing copy + if verr := backend.ValidateCopySeekPosition(f); verr != nil { + return verr + } + + azFile, ok := file.(*File) + if ok { + if f.isSameAuth(azFile) { + client, err := f.fileSystem.Client() + if err != nil { + return err + } + return client.Copy(f, file) + } + } + + // Otherwise, use TouchCopyBuffered using io.CopyBuffer + fileBufferSize := 0 + + if fs, ok := f.Location().FileSystem().(*FileSystem); ok { + fileBufferSize = fs.options.FileBufferSize + } + + if terr := utils.TouchCopyBuffered(file, f, fileBufferSize); terr != nil { + return terr + } + + if cerr := file.Close(); cerr != nil { + return cerr + } + + return err +} + +// MoveToLocation copies the receiver to the passed location. After the copy succeeds, the original is deleted. +func (f *File) MoveToLocation(location vfs.Location) (vfs.File, error) { + newFile, err := f.CopyToLocation(location) + if err != nil { + return nil, err + } + + return newFile, f.Delete() +} + +// MoveToFile copies the receiver to the specified file and deletes the original file. +func (f *File) MoveToFile(file vfs.File) error { + if err := f.CopyToFile(file); err != nil { + return err + } + + return f.Delete() +} + +// Delete deletes the file. +// If DeleteAllVersions option is provided, each version of the file is deleted. NOTE: if soft deletion is enabled, +// it will mark all versions as soft deleted, and they will be removed by Azure as per soft deletion policy. +// Returns any error returned by the API. +func (f *File) Delete(opts ...options.DeleteOption) error { + if err := f.Close(); err != nil { + return err + } + + client, err := f.fileSystem.Client() + if err != nil { + return err + } + + var deleteAllVersions bool + for _, o := range opts { + switch o.(type) { + case delete.DeleteAllVersions: + deleteAllVersions = true + default: + } + } + + if err := client.Delete(f); err != nil { + return err + } + + if deleteAllVersions { + return client.DeleteAllVersions(f) + } + + return err +} + +// LastModified returns the last modified time as a time.Time +func (f *File) LastModified() (*time.Time, error) { + client, err := f.fileSystem.Client() + if err != nil { + return nil, err + } + props, err := client.Properties(f.Location().(*Location).ContainerURL(), f.Path()) + if err != nil { + return nil, err + } + return props.LastModified, nil +} + +// Size returns the size of the blob +func (f *File) Size() (uint64, error) { + client, err := f.fileSystem.Client() + if err != nil { + return 0, err + } + props, err := client.Properties(f.Location().(*Location).ContainerURL(), f.Path()) + if err != nil { + return 0, err + } + return props.Size, nil +} + +// Path returns full path with leading slash. +func (f *File) Path() string { + return f.name +} + +// Name returns the name of the file +func (f *File) Name() string { + return path.Base(f.name) +} + +// Touch creates a zero-length file on the vfs.File if no File exists. If the file exists, Touch updates the file's +// last modified parameter. +func (f *File) Touch() error { + exists, err := f.Exists() + if err != nil { + return err + } + + client, err := f.fileSystem.Client() + if err != nil { + return err + } + + if !exists { + return client.Upload(f, strings.NewReader("")) + } + + props, err := client.Properties(f.Location().(*Location).ContainerURL(), f.Path()) + if err != nil { + return err + } + + newMetadata := make(map[string]string) + newMetadata["updated"] = "true" + if err := client.SetMetadata(f, newMetadata); err != nil { + return err + } + + if err := client.SetMetadata(f, props.Metadata); err != nil { + return err + } + + return nil +} + +// URI returns a full Azure URI for the file +func (f *File) URI() string { + return fmt.Sprintf("%s://%s%s", f.fileSystem.Scheme(), utils.EnsureTrailingSlash(f.fileSystem.Host()), path.Join(f.container, f.name)) +} + +func (f *File) checkTempFile() error { + if f.tempFile == nil { + client, err := f.fileSystem.Client() + if err != nil { + return err + } + + exists, err := f.Exists() + if err != nil { + return err + } + if !exists { + tf, tfErr := os.CreateTemp("", fmt.Sprintf("%s.%d", path.Base(f.Name()), time.Now().UnixNano())) + if tfErr != nil { + return tfErr + } + f.tempFile = tf + } else { + reader, dlErr := client.Download(f) + if dlErr != nil { + return dlErr + } + + tf, tfErr := os.CreateTemp("", fmt.Sprintf("%s.%d", path.Base(f.Name()), time.Now().UnixNano())) + if tfErr != nil { + return tfErr + } + + buffer := make([]byte, utils.TouchCopyMinBufferSize) + if _, err := io.CopyBuffer(tf, reader, buffer); err != nil { + return err + } + + if _, err := tf.Seek(0, 0); err != nil { + return err + } + + f.tempFile = tf + } + } + return nil +} + +func (f *File) isSameAuth(target *File) bool { + sourceOptions := f.fileSystem.options + targetOptions := target.fileSystem.options + return sourceOptions.AccountKey == targetOptions.AccountKey +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/azure/fileSystem.go b/vendor/github.com/c2fo/vfs/v6/backend/azure/fileSystem.go new file mode 100644 index 000000000..23280208e --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/azure/fileSystem.go @@ -0,0 +1,156 @@ +package azure + +import ( + "errors" + "fmt" + "net/url" + "os" + "path" + "regexp" + "strings" + + "github.com/c2fo/vfs/v6" + "github.com/c2fo/vfs/v6/backend" + "github.com/c2fo/vfs/v6/utils" +) + +// Scheme defines the scheme for the azure implementation +const Scheme = "https" + +// Name defines the name for the azure implementation +const Name = "azure" + +const errNilFileSystemReceiver = "azure.FileSystem receiver pointer must be non-nil" + +// FileSystem implements the vfs.FileSystem interface for Azure Blob Storage +type FileSystem struct { + options *Options + client Client +} + +// NewFileSystem creates a new default FileSystem. This will set the options options.AccountName and +// options.AccountKey with the env variables AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_ACCESS_KEY respectively. +func NewFileSystem() *FileSystem { + return &FileSystem{options: NewOptions()} +} + +// WithOptions allows the caller to override the default options +func (fs *FileSystem) WithOptions(opts vfs.Options) *FileSystem { + azureOpts, _ := opts.(Options) + fs.options = &azureOpts + return fs +} + +// WithClient allows the caller to specify a specific client to be used +func (fs *FileSystem) WithClient(client Client) *FileSystem { + fs.client = client + return fs +} + +// Client returns a Client to perform operations against Azure Blob Storage +func (fs *FileSystem) Client() (Client, error) { + if fs.client == nil { + client, err := NewClient(fs.options) + fs.client = client + return fs.client, err + } + return fs.client, nil +} + +// NewFile returns the azure implementation of vfs.File +func (fs *FileSystem) NewFile(volume, absFilePath string) (vfs.File, error) { + if fs == nil { + return nil, errors.New(errNilFileSystemReceiver) + } + + if volume == "" || absFilePath == "" { + return nil, errors.New("non-empty strings for container and path are required") + } + + if err := utils.ValidateAbsoluteFilePath(absFilePath); err != nil { + return nil, err + } + + return &File{ + fileSystem: fs, + container: volume, + name: path.Clean(absFilePath), + }, nil +} + +// NewLocation returns the azure implementation of vfs.Location +func (fs *FileSystem) NewLocation(volume, absLocPath string) (vfs.Location, error) { + if fs == nil { + return nil, errors.New(errNilFileSystemReceiver) + } + + if volume == "" || absLocPath == "" { + return nil, errors.New("non-empty strings for container and path are required") + } + + if err := utils.ValidateAbsoluteLocationPath(absLocPath); err != nil { + return nil, err + } + + return &Location{ + fileSystem: fs, + container: volume, + path: path.Clean(absLocPath), + }, nil +} + +// Name returns "azure" +func (fs *FileSystem) Name() string { + return Name +} + +// Scheme returns "https" as the initial part of the URI i.e. https://.. +func (fs *FileSystem) Scheme() string { + return Scheme +} + +// Host returns the host portion of the URI. For azure this consists of .blob.core.windows.net. +func (fs *FileSystem) Host() string { + return fmt.Sprintf("%s.blob.core.windows.net", fs.options.AccountName) +} + +// Retry returns the default retry function. This is overridable via the WithOptions function. +func (fs *FileSystem) Retry() vfs.Retry { + if fs.options.RetryFunc != nil { + return fs.options.RetryFunc + } + return vfs.DefaultRetryer() +} + +func init() { + // registers a default FileSystem + backend.Register(Scheme, NewFileSystem()) +} + +// ParsePath is a utility function used by vfssimple to separate the host from the path. The first parameter returned +// is the host and the second parameter is the path. +func ParsePath(p string) (host, pth string, err error) { + if p == "/" { + return "", "", errors.New("no container specified for Azure path") + } + var isLocation bool + if p[len(p)-1:] == string(os.PathSeparator) { + isLocation = true + } + l := strings.Split(p, string(os.PathSeparator)) + p = utils.EnsureLeadingSlash(path.Join(l[2:]...)) + if isLocation { + p = utils.EnsureTrailingSlash(p) + } + return l[1], p, nil +} + +// IsValidURI us a utility function used by vfssimple to determine if the given URI is a valid Azure URI +func IsValidURI(u *url.URL) bool { + r := regexp.MustCompile(`.*\.blob\.core\.windows\.net`) + + if u.Scheme == Scheme && r.MatchString(u.Host) { + return true + } + return false +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/azure/location.go b/vendor/github.com/c2fo/vfs/v6/backend/azure/location.go new file mode 100644 index 000000000..967a64d29 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/azure/location.go @@ -0,0 +1,206 @@ +package azure + +import ( + "errors" + "fmt" + "path" + "regexp" + "strings" + + "github.com/c2fo/vfs/v6" + "github.com/c2fo/vfs/v6/options" + "github.com/c2fo/vfs/v6/utils" +) + +const errNilLocationReceiver = "azure.Location receiver pointer must be non-nil" + +// Location is the azure implementation of vfs.Location +type Location struct { + container string + path string + fileSystem *FileSystem +} + +// String returns the URI +func (l *Location) String() string { + return l.URI() +} + +// List returns a list of base names for the given location. +func (l *Location) List() ([]string, error) { + client, err := l.fileSystem.Client() + if err != nil { + return nil, err + } + list, err := client.List(l) + if err != nil { + return nil, err + } + + var ret []string + for _, item := range list { + ret = append(ret, path.Base(item)) + } + + if len(ret) == 0 { + return []string{}, nil + } + + return ret, nil +} + +// ListByPrefix returns a list of base names that contain the given prefix +func (l *Location) ListByPrefix(prefix string) ([]string, error) { + if strings.Contains(prefix, "/") { + listLoc, err := l.NewLocation(utils.EnsureTrailingSlash(path.Dir(prefix))) + if err != nil { + return nil, err + } + + return listLocationByPrefix(listLoc.(*Location), path.Base(prefix)) + } + + return listLocationByPrefix(l, prefix) +} + +func listLocationByPrefix(location *Location, prefix string) ([]string, error) { + listing, err := location.List() + if err != nil { + return nil, err + } + + var filtered []string + for _, item := range listing { + if strings.HasPrefix(item, prefix) { + filtered = append(filtered, path.Base(item)) + } + } + + if len(filtered) == 0 { + return []string{}, nil + } + + return filtered, nil +} + +// ListByRegex returns a list of base names that match the given regular expression +func (l *Location) ListByRegex(regex *regexp.Regexp) ([]string, error) { + listing, err := l.List() + if err != nil { + return nil, err + } + + var filtered []string + for _, item := range listing { + if regex.MatchString(item) { + filtered = append(filtered, path.Base(item)) + } + } + + if len(filtered) == 0 { + return []string{}, nil + } + + return filtered, nil +} + +// Volume returns the azure container. Azure containers are equivalent to AWS Buckets +func (l *Location) Volume() string { + return l.container +} + +// Path returns the absolute path for the Location +func (l *Location) Path() string { + return utils.EnsureTrailingSlash(utils.EnsureLeadingSlash(l.path)) +} + +// Exists returns true if the file exists and false. In the case of errors false is always returned along with +// the error +func (l *Location) Exists() (bool, error) { + client, err := l.fileSystem.Client() + if err != nil { + return false, err + } + _, err = client.Properties(l.ContainerURL(), "") + if err != nil { + return false, nil + } + return true, nil +} + +// NewLocation creates a new location instance relative to the current location's path. +func (l *Location) NewLocation(relLocPath string) (vfs.Location, error) { + if l == nil { + return nil, errors.New(errNilLocationReceiver) + } + + if err := utils.ValidateRelativeLocationPath(relLocPath); err != nil { + return nil, err + } + + return &Location{ + fileSystem: l.fileSystem, + container: l.container, + path: path.Join(l.path, relLocPath), + }, nil +} + +// ChangeDir changes the current location's path to the new, relative path. +func (l *Location) ChangeDir(relLocPath string) error { + if l == nil { + return errors.New(errNilLocationReceiver) + } + + err := utils.ValidateRelativeLocationPath(relLocPath) + if err != nil { + return err + } + + l.path = path.Join(l.path, relLocPath) + + return nil +} + +// FileSystem returns the azure FileSystem instance +func (l *Location) FileSystem() vfs.FileSystem { + return l.fileSystem +} + +// NewFile returns a new file instance at the given path, relative to the current location. +func (l *Location) NewFile(relFilePath string) (vfs.File, error) { + if l == nil { + return nil, errors.New(errNilLocationReceiver) + } + + if err := utils.ValidateRelativeFilePath(relFilePath); err != nil { + return nil, err + } + + return &File{ + name: utils.EnsureLeadingSlash(path.Join(l.path, relFilePath)), + container: l.container, + fileSystem: l.fileSystem, + }, nil +} + +// DeleteFile deletes the file at the given path, relative to the current location. +func (l *Location) DeleteFile(relFilePath string, opts ...options.DeleteOption) error { + file, err := l.NewFile(utils.RemoveLeadingSlash(relFilePath)) + if err != nil { + return err + } + + return file.Delete(opts...) +} + +// URI returns a URI string for the azure location. +func (l *Location) URI() string { + return fmt.Sprintf("%s://%s%s", l.fileSystem.Scheme(), utils.EnsureTrailingSlash(l.fileSystem.Host()), + utils.EnsureTrailingSlash(path.Join(l.container, l.path))) +} + +// ContainerURL returns the URL for the Azure Blob Storage container. +func (l *Location) ContainerURL() string { + return fmt.Sprintf("%s://%s%s", l.fileSystem.Scheme(), utils.EnsureTrailingSlash(l.fileSystem.Host()), + utils.EnsureTrailingSlash(l.container)) +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/azure/mock_client.go b/vendor/github.com/c2fo/vfs/v6/backend/azure/mock_client.go new file mode 100644 index 000000000..55e1be82c --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/azure/mock_client.go @@ -0,0 +1,97 @@ +package azure + +import ( + "io" + "net/http" + + "github.com/Azure/azure-storage-blob-go/azblob" + + "github.com/c2fo/vfs/v6" +) + +// MockAzureClient is a mock implementation of azure.Client. +type MockAzureClient struct { + PropertiesError error + PropertiesResult *BlobProperties + ExpectedError error + ExpectedResult interface{} +} + +// Properties returns a PropertiesResult if it exists, otherwise it will return the value of PropertiesError +func (a *MockAzureClient) Properties(locationURI, filePath string) (*BlobProperties, error) { + if a.PropertiesResult == nil { + return nil, a.PropertiesError + } + return a.PropertiesResult, a.PropertiesError +} + +// SetMetadata returns the value of ExpectedError +func (a *MockAzureClient) SetMetadata(file vfs.File, metadata map[string]string) error { + return a.ExpectedError +} + +// Upload returns the value of ExpectedError +func (a *MockAzureClient) Upload(file vfs.File, content io.ReadSeeker) error { + return a.ExpectedError +} + +// Download returns ExpectedResult if it exists, otherwise it returns ExpectedError +func (a *MockAzureClient) Download(file vfs.File) (io.ReadCloser, error) { + if a.ExpectedResult != nil { + return a.ExpectedResult.(io.ReadCloser), nil + } + return nil, a.ExpectedError +} + +// Copy returns the value of ExpectedError +func (a *MockAzureClient) Copy(srcFile, tgtFile vfs.File) error { + return a.ExpectedError +} + +// List returns the value of ExpectedResult if it exists, otherwise it returns ExpectedError. +func (a *MockAzureClient) List(l vfs.Location) ([]string, error) { + if a.ExpectedResult != nil { + return a.ExpectedResult.([]string), nil + } + return nil, a.ExpectedError +} + +// Delete returns the value of ExpectedError +func (a *MockAzureClient) Delete(file vfs.File) error { + return a.ExpectedError +} + +// DeleteAllVersions returns the value of ExpectedError +func (a *MockAzureClient) DeleteAllVersions(file vfs.File) error { + return a.ExpectedError +} + +// MockStorageError is a mock for the azblob.StorageError interface +type MockStorageError struct { + azblob.ResponseError +} + +// ServiceCode always returns "BlobNotFound" to simulate the not found condition +func (mse MockStorageError) ServiceCode() azblob.ServiceCodeType { + return "BlobNotFound" +} + +// Response returns nil +func (mse MockStorageError) Response() *http.Response { + return nil +} + +// Timeout returns nil +func (mse MockStorageError) Timeout() bool { + return false +} + +// Temporary returns nil +func (mse MockStorageError) Temporary() bool { + return false +} + +// Error returns empty string +func (mse MockStorageError) Error() string { + return "" +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/azure/mock_token.go b/vendor/github.com/c2fo/vfs/v6/backend/azure/mock_token.go new file mode 100644 index 000000000..27af9755f --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/azure/mock_token.go @@ -0,0 +1,15 @@ +package azure + +import ( + "time" + + "github.com/Azure/azure-storage-blob-go/azblob" +) + +// MockTokenCredentialFactory knows how to create a "do-nothing" credential used for unit testing +type MockTokenCredentialFactory struct{} + +// New creates a new azblob.TokenCredntial struct +func (f *MockTokenCredentialFactory) New(tenantID, clientID, clientSecret, azureEnvName string) (azblob.TokenCredential, error) { + return azblob.NewTokenCredential("aaa", func(credential azblob.TokenCredential) time.Duration { return time.Second * 1 }), nil +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/azure/options.go b/vendor/github.com/c2fo/vfs/v6/backend/azure/options.go new file mode 100644 index 000000000..b88445385 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/azure/options.go @@ -0,0 +1,91 @@ +package azure + +import ( + "os" + + "github.com/c2fo/vfs/v6" + + "github.com/Azure/azure-storage-blob-go/azblob" +) + +// Options contains options necessary for the azure vfs implementation +type Options struct { + // AccountName holds the Azure Blob Storage account name for authentication. This field is required for all + // authentication types. + AccountName string + + // AccountKey holds the Azure Blob Storage account key for authentication. This field is used for shared key + // authentication. + AccountKey string + + // TenantID holds the Azure Service Account tenant id for authentication. This field is used for OAuth token + // based authentication. + TenantID string + + // ClientID holds the Azure Service Account client id for authentication. This field is used for OAuth token + // based authentication. + ClientID string + + // ClientSecret holds the Azure Service Account client secret for authentication. This field is used for OAuth token + // based authentication. + ClientSecret string + + // AzureEnvName holds the name for the Azure environment. This field is used for OAuth token + // based authentication. + AzureEnvName string + + // RetryFunc holds the retry function + RetryFunc vfs.Retry + + // Buffer Size In Bytes Used with utils.TouchCopyBuffered + FileBufferSize int + + tokenCredentialFactory TokenCredentialFactory +} + +// NewOptions creates a new Options struct by populating values from environment variables. +// +// Env Vars: +// *VFS_AZURE_STORAGE_ACCOUNT +// *VFS_AZURE_STORAGE_ACCESS_KEY +// *VFS_AZURE_TENANT_ID +// *VFS_AZURE_CLIENT_ID +// *VFS_AZURE_CLIENT_SECRET +// *VFS_AZURE_ENV_NAME +func NewOptions() *Options { + return &Options{ + AccountName: os.Getenv("VFS_AZURE_STORAGE_ACCOUNT"), + AccountKey: os.Getenv("VFS_AZURE_STORAGE_ACCESS_KEY"), + TenantID: os.Getenv("VFS_AZURE_TENANT_ID"), + ClientID: os.Getenv("VFS_AZURE_CLIENT_ID"), + ClientSecret: os.Getenv("VFS_AZURE_CLIENT_SECRET"), + AzureEnvName: os.Getenv("VFS_AZURE_ENV_NAME"), + tokenCredentialFactory: &DefaultTokenCredentialFactory{}, + } +} + +// Credential returns an azblob.Credential struct based on how options are configured. Options are checked +// and evaluated in the following order: +// 1. If TenantID, ClientID, and ClientSecret are non-empty, return azblob.TokenCredential. This form of authentication +// is used with service accounts and can be used to access containers across multiple storage accounts. +// 2. If AccountName, and AccountKey are non-empty, return azblob.SharedKeyCredential. This form or authentication +// is used with storage accounts and only provides access to a single storage account. +// 3. Returns an anonymous credential. This allows access only to public blobs. +func (o *Options) Credential() (azblob.Credential, error) { + if o.tokenCredentialFactory == nil { + o.tokenCredentialFactory = &DefaultTokenCredentialFactory{} + } + + // Check to see if we have service account credentials + if o.TenantID != "" && o.ClientID != "" && o.ClientSecret != "" { + return o.tokenCredentialFactory.New(o.TenantID, o.ClientID, o.ClientSecret, o.AzureEnvName) + } + + // Check to see if we have storage account credentials + if o.AccountName != "" && o.AccountKey != "" { + return azblob.NewSharedKeyCredential(o.AccountName, o.AccountKey) + } + + // 3. Return an anonymous credential + return azblob.NewAnonymousCredential(), nil +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/azure/properties.go b/vendor/github.com/c2fo/vfs/v6/backend/azure/properties.go new file mode 100644 index 000000000..d2fe9c092 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/azure/properties.go @@ -0,0 +1,29 @@ +package azure + +import ( + "time" + + "github.com/Azure/azure-storage-blob-go/azblob" +) + +// BlobProperties holds a subset of information returned by Blob.GetProperties(..) +type BlobProperties struct { + // Size holds the size of the blob. + Size uint64 + + // LastModified holds the last modified time.Time + LastModified *time.Time + + // Metadata holds the Azure metadata + Metadata map[string]string +} + +// NewBlobProperties creates a new BlobProperties from an azblob.BlobGetPropertiesResponse +func NewBlobProperties(azureProps *azblob.BlobGetPropertiesResponse) *BlobProperties { + lastModified := azureProps.LastModified() + return &BlobProperties{ + LastModified: &lastModified, + Metadata: azureProps.NewMetadata(), + Size: uint64(azureProps.ContentLength()), + } +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/azure/token.go b/vendor/github.com/c2fo/vfs/v6/backend/azure/token.go new file mode 100644 index 000000000..ab2b01f0c --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/azure/token.go @@ -0,0 +1,55 @@ +package azure + +import ( + "time" + + "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/azure" +) + +// TokenCredentialFactory is an interface that provides a single factory method to create azure.TokenCredentials. This +// interface is provided to allow for mocking in unit tests. +type TokenCredentialFactory interface { + // New creates a new azblob.TokenCredntial struct + New(tenantID, clientID, clientSecret, azureEnvName string) (azblob.TokenCredential, error) +} + +// DefaultTokenCredentialFactory knows how to make azblob.TokenCredential structs for OAuth authentication +type DefaultTokenCredentialFactory struct{} + +// New creates a new azblob.TokenCredntial struct +func (f *DefaultTokenCredentialFactory) New(tenantID, clientID, clientSecret, azureEnvName string) (azblob.TokenCredential, error) { + env, err := azure.EnvironmentFromName(azureEnvName) + if err != nil { + return nil, err + } + + oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, tenantID) + if err != nil { + return nil, err + } + + spt, err := adal.NewServicePrincipalToken(*oauthConfig, clientID, clientSecret, env.ResourceIdentifiers.Storage) + if err != nil { + return nil, err + } + + initialToken := spt.Token() + + var tokenRefresher = func(credential azblob.TokenCredential) time.Duration { + err := spt.Refresh() + if err != nil { + // Not sure what else to do here except panic... + // The documentation suggests returning 0 so the token will never attempt to refresh again + // and then call cancel() on any context in the pipeline that have this credential object + panic(err) + } + + token := spt.Token() + credential.SetToken(token.AccessToken) + return token.Expires().Sub(time.Now().Add(2 * time.Minute)) + } + + return azblob.NewTokenCredential(initialToken.AccessToken, tokenRefresher), nil +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/backend.go b/vendor/github.com/c2fo/vfs/v6/backend/backend.go new file mode 100644 index 000000000..2099671a9 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/backend.go @@ -0,0 +1,56 @@ +package backend + +import ( + "sort" + "sync" + + "github.com/c2fo/vfs/v6" +) + +var mmu sync.RWMutex +var m map[string]vfs.FileSystem + +// Register a new file system in backend map +func Register(name string, v vfs.FileSystem) { + mmu.Lock() + m[name] = v + mmu.Unlock() +} + +// Unregister unregisters a file system from backend map +func Unregister(name string) { + mmu.Lock() + delete(m, name) + mmu.Unlock() +} + +// UnregisterAll unregisters all file systems from backend map +func UnregisterAll() { + // mainly for tests + mmu.Lock() + m = make(map[string]vfs.FileSystem) + mmu.Unlock() +} + +// Backend returns the backend file system by name +func Backend(name string) vfs.FileSystem { + mmu.RLock() + defer mmu.RUnlock() + return m[name] +} + +// RegisteredBackends returns an array of backend names +func RegisteredBackends() []string { + var f []string + mmu.RLock() + for k := range m { + f = append(f, k) + } + mmu.RUnlock() + sort.Strings(f) + return f +} + +func init() { + m = make(map[string]vfs.FileSystem) +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/doc.go b/vendor/github.com/c2fo/vfs/v6/backend/doc.go new file mode 100644 index 000000000..53cdda09e --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/doc.go @@ -0,0 +1,76 @@ +/* +Package backend provides a means of allowing backend file systems to self-register on load via an init() call to +backend.Register("some name", vfs.FileSystem) + +In this way, a caller of vfs backends can simply load the backend file system (and ONLY those needed) and begin using it: + + package main + + // import backend and each backend you intend to use + import( + "github.com/c2fo/vfs/v6/backend" + "github.com/c2fo/vfs/v6/backend/os" + "github.com/c2fo/vfs/v6/backend/s3" + ) + + func main() { + var err error + var osfile, s3file vfs.File + + // THEN begin using the file systems + osfile, err = backend.Backend(os.Scheme).NewFile("", "/path/to/file.txt") + if err != nil { + panic(err) + } + + s3file, err = backend.Backend(s3.Scheme).NewFile("mybucket", "/some/file.txt") + if err != nil { + panic(err) + } + + err = osfile.CopyTo(s3file) + if err != nil { + panic(err) + } + } + +# Development + +To create your own backend, you must create a package that implements the interfaces: vfs.FileSystem, vfs.Location, and vfs.File. +Then ensure it registers itself on load: + + package myexoticfilesystem + + import( + ... + "github.com/c2fo/vfs/v6" + "github.com/c2fo/vfs/v6/backend" + ) + + // IMPLEMENT vfs interfaces + ... + + // register backend + func init() { + backend.Register("exfs", &MyExoticFilesystem{}) + } + +Then do use it in some other package do + + package MyExoticFileSystem + + import( + "github.com/c2fo/vfs/v6/backend" + "github.com/acme/myexoticfilesystem" + ) + + ... + + func useNewBackend() error { + myExoticFs, err = backend.Backend(myexoticfilesystem.Scheme) + ... + } + +That's it. Simple. +*/ +package backend diff --git a/vendor/github.com/c2fo/vfs/v6/backend/ftp/dataconn.go b/vendor/github.com/c2fo/vfs/v6/backend/ftp/dataconn.go new file mode 100644 index 000000000..81ec75799 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/ftp/dataconn.go @@ -0,0 +1,220 @@ +package ftp + +import ( + "context" + "errors" + "io" + "net/textproto" + "time" + + _ftp "github.com/jlaffaye/ftp" + + "github.com/c2fo/vfs/v6/backend/ftp/types" + "github.com/c2fo/vfs/v6/utils" +) + +type dataConn struct { + R io.ReadCloser + W io.WriteCloser + mode types.OpenType + c types.Client + errChan chan error +} + +// Delete deletes the file at the given path. Only allowed +// in a single op connection. +func (dc *dataConn) Delete(path string) error { + if dc.mode != types.SingleOp { + return singleOpInvalidDataconnType + } + return dc.c.Delete(path) + +} + +// GetEntry attempts to retrieve the ftp entry for +// a file at the given path. Only allowed +// in a single op connection. +func (dc *dataConn) GetEntry(p string) (*_ftp.Entry, error) { + if dc.mode != types.SingleOp { + return nil, singleOpInvalidDataconnType + } + return dc.c.GetEntry(p) +} + +// List conducts an FTP list for the given path. Only allowed in a single op connection. +func (dc *dataConn) List(p string) ([]*_ftp.Entry, error) { + if dc.mode != types.SingleOp { + return nil, singleOpInvalidDataconnType + } + return dc.c.List(p) +} + +// MakeDir creates a directory for the given path. +// Only allowed in a single op connection. +func (dc *dataConn) MakeDir(path string) error { + if dc.mode != types.SingleOp { + return singleOpInvalidDataconnType + } + return dc.c.MakeDir(path) +} + +// Rename attempts to change the name of the file at from +// to the name specified at to. Only allowed in a single op connection. +func (dc *dataConn) Rename(from, to string) error { + if dc.mode != types.SingleOp { + return singleOpInvalidDataconnType + } + return dc.c.Rename(from, to) +} + +// IsSetTimeSupported will specify whether the ftp +// set time function is availalbe for the connection. +func (dc *dataConn) IsSetTimeSupported() bool { + return dc.c.IsSetTimeSupported() +} + +// SetTime will attempt to set the last modified time of +// a file at the given path to the given time. Only allowed +// in single op mode. +func (dc *dataConn) SetTime(path string, t time.Time) error { + if dc.mode != types.SingleOp { + return singleOpInvalidDataconnType + } + return dc.c.SetTime(path, t) +} + +// IsTimePreciseInList will return true if MLST is an available FTP action in the given dataconn. +func (dc *dataConn) IsTimePreciseInList() bool { + return dc.c.IsTimePreciseInList() +} + +// Mode will return the mode of the DataConn. +func (dc *dataConn) Mode() types.OpenType { + return dc.mode +} + +// Read will read bytes from the DataConn open file to the given buffer. Only allowed in an open read DataConn +func (dc *dataConn) Read(buf []byte) (int, error) { + if dc.mode != types.OpenRead { + return 0, readInvalidDataconnType + } + return dc.R.Read(buf) +} + +// Write will write bytes to the DataConn open file. +func (dc *dataConn) Write(data []byte) (int, error) { + if dc.mode != types.OpenWrite { + return 0, writeInvalidDataconnType + } + return dc.W.Write(data) +} + +// Close will close the DataConnection for reading +// and writing. +func (dc *dataConn) Close() error { + switch dc.Mode() { + case types.OpenRead: + if dc.R != nil { + err := dc.R.Close() + dc.W = nil + dc.R = nil + return err + } + case types.OpenWrite: + if dc.W != nil { + if err := dc.W.Close(); err != nil { + return err + } + // after writer is closed STOR shoud commit - check for error + err := <-dc.errChan + dc.W = nil + dc.R = nil + return err + } + } + + return nil +} + +func getDataConn(ctx context.Context, authority utils.Authority, fs *FileSystem, f *File, t types.OpenType) (types.DataConn, error) { + if fs == nil { + return nil, errors.New("can not get a dataconn for a nil fileset") + } + if fs.dataconn != nil && fs.dataconn.Mode() != t { + // wrong session type ... close current session and unset it (ps so we can set a new one after) + if err := fs.dataconn.Close(); err != nil { + return nil, err + } + fs.dataconn = nil + } + + if fs.dataconn == nil || fs.resetConn { + client, err := fs.Client(ctx, authority) + if err != nil { + return nil, err + } + + switch t { + case types.OpenRead: + resp, err := client.RetrFrom(f.Path(), uint64(f.offset)) + // check errors + if err != nil { + return nil, err + } + fs.dataconn = &dataConn{ + R: resp, + mode: t, + } + case types.OpenWrite: + dc, err := openWriteConnection(client, f) + if err != nil { + return nil, err + } + fs.dataconn = dc + case types.SingleOp: + fs.dataconn = &dataConn{ + mode: t, + c: client, + } + } + // ensure resetConn is false since we've opened/reopened the file + if f != nil { + fs.resetConn = false + } + } + + return fs.dataconn, nil +} + +func openWriteConnection(client types.Client, f *File) (types.DataConn, error) { + found, err := f.Location().Exists() + if err != nil { + return nil, err + } + if !found { + err := client.MakeDir(f.Location().Path()) + if err != nil { + var e *textproto.Error + if !(errors.As(err, &e) && e.Code == _ftp.StatusFileUnavailable) { + // Return if the error is not because the directory already exists + return nil, err + } + } + } + pr, pw := io.Pipe() + errChan := make(chan error, 1) + go func(errChan chan error) { + err := client.StorFrom(f.Path(), pr, uint64(f.offset)) + errChan <- err + // close the pipe reader so that writes to the dataconn aren't blocking. + // error will occur when pipereader is already closed - nothing to do in that case. + _ = pr.Close() + }(errChan) + + return &dataConn{ + mode: types.OpenWrite, + R: pr, + W: pw, + errChan: errChan, + }, nil +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/ftp/doc.go b/vendor/github.com/c2fo/vfs/v6/backend/ftp/doc.go new file mode 100644 index 000000000..1aef9eadb --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/ftp/doc.go @@ -0,0 +1,152 @@ +/* +Package ftp - FTP VFS implementation. + +# Usage + +Rely on github.com/c2fo/vfs/v6/backend + + import( + "github.com/c2fo/vfs/v6/backend" + "github.com/c2fo/vfs/v6/backend/ftp" + ) + + func UseFs() error { + fs := backend.Backend(ftp.Scheme) + ... + } + +Or call directly: + + import "github.com/c2fo/vfs/v6/backend/ftp" + + func DoSomething() { + fs := ftp.NewFilesystem() + + location, err := fs.NewLocation("myuser@server.com:21", "/some/path/") + if err != nil { + #handle error + } + ... + } + +ftp can be augmented with some implementation-specific methods. Backend returns vfs.Filesystem interface so it +would have to be cast as ftp.Filesystem to use them. + +These methods are chainable: +(*FileSystem) WithClient(client interface{}) *FileSystem +(*FileSystem) WithOptions(opts vfs.Options) *FileSystem + + func DoSomething() { + + // cast if fs was created using backend.Backend(). Not necessary if created directly from ftp.NewFilesystem(). + fs := backend.Backend(ftp.Scheme) + fs = fs.(*ftp.Filesystem) + + // to pass specific client implementing types.Client interface (in this case, _ftp github.com/jlaffaye/ftp) + client, _ := _ftp.Dial("server.com:21") + fs = fs.WithClient(client) + + // to pass in client options. See Options for more info. Note that changes to Options will make nil any client. + // This behavior ensures that changes to settings will get applied to a newly created client. + fs = fs.WithOptions( + ftp.Options{ + Password: "s3cr3t", + DisableEPSV: true, + Protocol: ftp.ProtocolFTPES, + DialTimeout: 15 * time.Second, + DebugWriter: os.Stdout, + }, + ) + + location, err := fs.NewLocation("myuser@server.com:21", "/some/path/") + #handle error + + file, err := location.NewFile("myfile.txt") + #handle error + + _, err = file.Write([]byte("some text")) + #handle error + + err = file.Close() + #handle error + + } + +Note - this vfs implementation can have issues conducting simultaneous reads and writes on files created from the same filesystem. This can +cause issues when attempting to use those files with functions such as io.CopyBuffer. + +The provided CopyToFile and CopyToLocation functions should be used instead in these instances. + + // initialize a location using bob@ftp.acme.com + _, _ := os.Setenv("VFS_FTP_PASSWORD", "somepass") + someLocation, _ := vfssimple.NewLocation("ftp://bob@ftp.acme.com/some/path/") + + // open some existing file + oldFile, _ := someLocation.NewFile("someExistingFile.txt") + + // open some new file using same filesystem (same auth/host, same client connection) + newFile, _ := someLocation.NewFile("someNonExistentFile.txt") + + // can't read and write simultaneously from the same client connection - will result in + // an error + written, err := io.Copy(newFile, oldFile) + + // CopyToFile/CopyToLocation, however, will work as expected because we copy to an + // intermediate local file, thereby making the Read / Write to the remote files sequential. + // MoveToFile/MoveToLocation are unaffected since they essentially just an FTP "RENAME". + err := oldFile.CopyToFile(newFile) + +# Authentication + +Authentication, by default, occurs automatically when Client() is called. Since user is part of the URI authority section +(Volume), auth is handled slightly differently than other vfs backends (except SFTP). + +A client is initialized lazily, meaning we only make a connection to the server at the last moment, so we are free to modify +options until then. The authenticated session is closed any time WithOption() or WithClient() occurs. + +## USERNAME + +User may only be set in the URI authority section (Volume in vfs parlance). + + scheme host + __/ ___/____ port + / \ / \ /\ + ftp://someuser@server.com:22/path/to/file.txt + \____________________/ \______________/ + \______/ \ \ + / authority section path + username (Volume) + +ftp vfs backend defaults to "anonymous" if no username is provided in the authority, ie "ftp://service.com/". + +## PASSWORD + +Passwords may be passed via Options.Password or via the environmental variable *VFS_FTP_PASSWORD*. If not password is provided, +default is "anonymous". Password precedence is default, env var, Options.Password, such that env var, if set, overrides default +and Options.Password, if set, overrides env var. + +# Protocol + +The ftp backend supports the following FTP protocols: FTP (unencrypted), FTPS (implicit TLS), and FTPES (explicit TLS). Protocol can be set +by env var *VFS_FTP_PROTOCOL* or in Options.Protocol. Options values take precedence over env vars. + +By default, FTPS and FTPS will use the following TLS configuration but can be overridden(recommended) with Options.TLSConfig: + + tlsConfig := &tls.Config{ + MinVersion: tls.VersionTLS12, + InsecureSkipVerify: true, + ClientSessionCache: tls.NewLRUClientSessionCache(0), + ServerName: hostname, + } + +See https://pkg.go.dev/crypto/tls#Config for all TLS configuration options. + +# Other Options + +DebugWriter *io.Writer* - captures FTP command details to any writer. + +DialTimeout *time.Duration - sets timeout for connecting only. + +DisableEPSV bool - Extended Passive mode (EPSV) is attempted by default. Set to true to use regular Passive mode (PASV). +*/ +package ftp diff --git a/vendor/github.com/c2fo/vfs/v6/backend/ftp/errors.go b/vendor/github.com/c2fo/vfs/v6/backend/ftp/errors.go new file mode 100644 index 000000000..e0a00de83 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/ftp/errors.go @@ -0,0 +1,9 @@ +package ftp + +type dataConnErr string + +func (e dataConnErr) Error() string { return string(e) } + +const singleOpInvalidDataconnType = dataConnErr("dataconn must be open for single op mode to conduct a single op action") +const readInvalidDataconnType = dataConnErr("dataconn must be open for read mode to conduct a read") +const writeInvalidDataconnType = dataConnErr("dataconn must be open for write mode to conduct a write") diff --git a/vendor/github.com/c2fo/vfs/v6/backend/ftp/file.go b/vendor/github.com/c2fo/vfs/v6/backend/ftp/file.go new file mode 100644 index 000000000..45b69377b --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/ftp/file.go @@ -0,0 +1,456 @@ +package ftp + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "path" + "strconv" + "strings" + "time" + + _ftp "github.com/jlaffaye/ftp" + + "github.com/c2fo/vfs/v6" + "github.com/c2fo/vfs/v6/backend" + "github.com/c2fo/vfs/v6/backend/ftp/types" + "github.com/c2fo/vfs/v6/options" + "github.com/c2fo/vfs/v6/utils" +) + +var tempFileNameGetter func(string) string +var now = time.Now + +func init() { + // this func is overridable for tests + tempFileNameGetter = getTempFilename +} + +// File implements vfs.File interface for FTP fs. +type File struct { + fileSystem *FileSystem + authority utils.Authority + path string + offset int64 +} + +// Info Functions + +// LastModified returns the LastModified property of ftp file. +func (f *File) LastModified() (*time.Time, error) { + + entry, err := f.stat(context.TODO()) + if err != nil { + return nil, err + } + t := entry.Time + return &t, nil +} + +func (f *File) stat(ctx context.Context) (*_ftp.Entry, error) { + dc, err := f.fileSystem.DataConn(ctx, f.authority, types.SingleOp, f) + if err != nil { + return nil, err + } + // check if MLSD command is availalbe - if so we'll want to grab file info + // via MLST. otherwise we'll need to use LIST. + if dc.IsTimePreciseInList() { + entry, err := dc.GetEntry(f.Path()) + if err != nil { + if strings.HasPrefix(err.Error(), fmt.Sprintf("%d", _ftp.StatusFileUnavailable)) { + return nil, os.ErrNotExist + } + return nil, err + } + return entry, nil + } else { + entries, err := dc.List(f.Path()) + if err != nil { + if strings.HasPrefix(err.Error(), fmt.Sprintf("%d", _ftp.StatusFileUnavailable)) { + return nil, os.ErrNotExist + } + return nil, err + } + if len(entries) == 0 { + return nil, os.ErrNotExist + } + return entries[0], nil + } +} + +// Name returns the path portion of the file's path property. IE: "file.txt" of "ftp://someuser@host.com/some/path/to/file.txt +func (f *File) Name() string { + return path.Base(f.path) +} + +// Path return the directory portion of the file's path. IE: "/path/to/" of "ftp://someuser@host.com/some/path/to/file.txt +func (f *File) Path() string { + return utils.EnsureLeadingSlash(f.path) +} + +// Exists returns a boolean of whether or not the file exists on the ftp server +func (f *File) Exists() (bool, error) { + _, err := f.stat(context.TODO()) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + // file does not exist + return false, nil + } + // error calling stat + return false, err + } + + // file exists + return true, nil +} + +// Touch creates a zero-length file on the vfs.File if no File exists. Update File's last modified timestamp. +// Returns error if unable to touch File. +func (f *File) Touch() error { + exists, err := f.Exists() + if err != nil { + return err + } + + if !exists { + _, err := f.Write([]byte{}) + if err != nil { + return err + } + return f.Close() + } + + // if a set time function is available use that to set last modified to now + dc, err := f.fileSystem.DataConn(context.TODO(), f.authority, types.SingleOp, f) + if err != nil { + return err + } + if dc.IsSetTimeSupported() { + return dc.SetTime(f.path, time.Now()) + } + + // doing move and move back to ensure last modified is updated + newFile, err := f.Location().NewFile(tempFileNameGetter(f.Name())) + if err != nil { + return err + } + + err = f.MoveToFile(newFile) + if err != nil { + return err + } + + return newFile.MoveToFile(f) +} + +func getTempFilename(origName string) string { + return origName + strconv.FormatInt(now().UnixNano(), 10) +} + +// Size returns the size of the remote file. +func (f *File) Size() (uint64, error) { + + entry, err := f.stat(context.TODO()) + if err != nil { + return 0, err + } + return entry.Size, nil +} + +// Location returns a vfs.Location at the location of the file. IE: if file is at +// ftp://someuser@host.com/here/is/the/file.txt the location points to ftp://someuser@host.com/here/is/the/ +func (f *File) Location() vfs.Location { + return &Location{ + fileSystem: f.fileSystem, + path: path.Dir(f.path), + Authority: f.authority, + } +} + +// Move/Copy Operations + +// MoveToFile puts the contents of File into the targetFile passed using File.CopyToFile. +// If the copy succeeds, the source file is deleted. Any errors from the copy or delete are +// returned. +// If the given location is also ftp AND for the same user and host, the ftp Rename method is used, otherwise +// we'll do a an io.Copy to the destination file then delete source file. +func (f *File) MoveToFile(t vfs.File) error { + // ftp rename if vfs is ftp and for the same user/host + if f.fileSystem.Scheme() == t.Location().FileSystem().Scheme() && + f.authority.UserInfo().Username() == t.(*File).authority.UserInfo().Username() && + f.authority.HostPortStr() == t.(*File).authority.HostPortStr() { + + // ensure destination exists before moving + exists, err := t.Location().Exists() + if err != nil { + return err + } + dc, err := f.fileSystem.DataConn(context.TODO(), f.authority, types.SingleOp, f) + if err != nil { + return err + } + if !exists { + // it doesn't matter which client we use since they are effectively the same + err = dc.MakeDir(t.Location().Path()) + if err != nil { + return fmt.Errorf("failed to create directory: %w", err) + } + } + return dc.Rename(f.Path(), t.Path()) + } + + // otherwise do copy-delete + if err := f.CopyToFile(t); err != nil { + return err + } + return f.Delete() +} + +// MoveToLocation works by creating a new file on the target location then calling MoveToFile() on it. +func (f *File) MoveToLocation(location vfs.Location) (vfs.File, error) { + newFile, err := location.NewFile(f.Name()) + if err != nil { + return nil, err + } + + err = f.MoveToFile(newFile) + if err != nil { + return nil, err + } + return newFile, nil +} + +// CopyToFile puts the contents of File into the targetFile passed. +func (f *File) CopyToFile(file vfs.File) (err error) { //nolint:gocyclo + // Close file (f) reader regardless of an error + defer func() { + // close writer + wErr := file.Close() + // close reader + rErr := f.Close() + // + if err == nil { + if wErr != nil { + err = wErr + } else if rErr != nil { + err = rErr + } + } + }() + + if err := backend.ValidateCopySeekPosition(f); err != nil { + return err + } + + if f.fileSystem.Scheme() == file.Location().FileSystem().Scheme() && + f.authority.UserInfo().Username() == file.(*File).authority.UserInfo().Username() && + f.authority.HostPortStr() == file.(*File).authority.HostPortStr() { + // in the case that both files have the same authority we'll copy by writing a temporary + // file to mem and then writing it back to the ftp server + tempFile, err := f.createLocalTempFile() + if err != nil { + return err + } + defer func() { + _ = os.Remove(tempFile.Name()) + }() + if err := utils.TouchCopyBuffered(tempFile, f, 0); err != nil { + return err + } + // validate seek is at 0,0 before doing copy + _, err = tempFile.Seek(0, io.SeekStart) + if err != nil { + return fmt.Errorf("failed to determine current cursor offset: %w", err) + } + + if err := f.Close(); err != nil { + return err + } + if err := utils.TouchCopyBuffered(file, tempFile, 0); err != nil { + return err + } + if err := tempFile.Close(); err != nil { + return err + } + + return nil + } else { + if err := utils.TouchCopyBuffered(file, f, 0); err != nil { + return err + } + // Close target to flush and ensure that cursor isn't at the end of the file when the caller reopens for read + if cerr := file.Close(); cerr != nil { + return cerr + } + + return err + } + +} + +// CopyToLocation creates a copy of *File, using the file's current path as the new file's +// path at the given location. +func (f *File) CopyToLocation(location vfs.Location) (vfs.File, error) { + newFile, err := location.NewFile(f.Name()) + if err != nil { + return nil, err + } + + return newFile, f.CopyToFile(newFile) +} + +// CRUD Operations + +// Delete removes the remote file. Error is returned, if any. +func (f *File) Delete(_ ...options.DeleteOption) error { + dc, err := f.fileSystem.DataConn(context.TODO(), f.authority, types.SingleOp, f) + if err != nil { + return err + } + return dc.Delete(f.Path()) +} + +// Close calls the underlying ftp.Response Close, if opened, and clears the internal pointer +func (f *File) Close() error { + if f.fileSystem.dataconn != nil { + err := f.fileSystem.dataconn.Close() + if err != nil { + return utils.WrapCloseError(err) + } + f.fileSystem.resetConn = true + } + // no op for unopened file + f.offset = 0 + return nil +} + +// Read calls the underlying ftp.File Read. +func (f *File) Read(p []byte) (n int, err error) { + dc, err := f.fileSystem.DataConn(context.TODO(), f.authority, types.OpenRead, f) + if err != nil { + return 0, utils.WrapReadError(err) + } + + read, err := dc.Read(p) + if err != nil { + // if we got io.EOF, we'll return the read and the EOF error + // because io.Copy looks for EOF to determine if it's done + // and doesn't support error wrapping + if errors.Is(err, io.EOF) { + return read, io.EOF + } + return read, utils.WrapReadError(err) + } + + f.offset += int64(read) + + return read, nil +} + +// Seek calls the underlying ftp.File Seek. +func (f *File) Seek(offset int64, whence int) (int64, error) { + // ensure file exists before seeking + exists, err := f.Exists() + if err != nil { + return 0, utils.WrapSeekError(err) + } + if !exists { + return 0, utils.WrapSeekError(os.ErrNotExist) + } + + mode := types.OpenRead + // no file open yet - assume read (will get reset to write on a subsequent write) + if f.fileSystem.dataconn == nil { + f.offset = offset + } else { + mode = f.fileSystem.dataconn.Mode() + + switch whence { + case 0: // offset from the beginning of the file (position 0) + f.offset = offset + case 1: // offset relative to current position + if f.offset < 0 { + f.offset = 0 + } + f.offset += offset + + // close dataconn so that it reset the offset on next reopen (in StorFrom or RetrFrom) + err := f.fileSystem.dataconn.Close() + if err != nil { + return 0, utils.WrapSeekError(err) + } + f.fileSystem.resetConn = true + case 2: // offset from end of the file + sz, err := f.Size() + if err != nil { + if !errors.Is(err, os.ErrNotExist) { + return 0, utils.WrapSeekError(err) + } + // file doesn't exist, just use 0 as offset + f.offset = 0 + } else { + f.offset = int64(sz) - offset + if f.offset < 0 { + f.offset = 0 + } + } + + // close dataconn so that it reset the offset on next reopen (in StorFrom or RetrFrom) + err = f.fileSystem.dataconn.Close() + if err != nil { + return 0, utils.WrapSeekError(err) + } + f.fileSystem.resetConn = true + } + } + + // now that f.offset has been adjusted and mode was captured, reinitialize file + _, err = f.fileSystem.DataConn(context.TODO(), f.authority, mode, f) + if err != nil { + return 0, utils.WrapSeekError(err) + } + + // return new offset from beginning of file + return f.offset, nil +} + +// Write calls the underlying ftp.File Write. +func (f *File) Write(data []byte) (res int, err error) { + dc, err := f.fileSystem.DataConn(context.TODO(), f.authority, types.OpenWrite, f) + if err != nil { + return 0, utils.WrapWriteError(err) + } + + b, err := dc.Write(data) + if err != nil { + return 0, utils.WrapWriteError(err) + } + + offset := int64(b) + f.offset += offset + + return b, nil +} + +// URI returns the File's URI as a string. +func (f *File) URI() string { + loc := f.Location().(*Location) + return utils.EncodeURI( + f.fileSystem.Scheme(), + loc.Authority.UserInfo().Username(), + loc.Authority.HostPortStr(), + f.Path(), + ) +} + +// String implement fmt.Stringer, returning the file's URI as the default string. +func (f *File) String() string { + return f.URI() +} + +func (f *File) createLocalTempFile() (*os.File, error) { + // Create temp file + return os.CreateTemp("", fmt.Sprintf("%s.%d", f.Name(), time.Now().UnixNano())) +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/ftp/fileSystem.go b/vendor/github.com/c2fo/vfs/v6/backend/ftp/fileSystem.go new file mode 100644 index 000000000..2511725a3 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/ftp/fileSystem.go @@ -0,0 +1,151 @@ +package ftp + +import ( + "context" + "errors" + "fmt" + "path" + + "github.com/c2fo/vfs/v6" + "github.com/c2fo/vfs/v6/backend" + "github.com/c2fo/vfs/v6/backend/ftp/types" + "github.com/c2fo/vfs/v6/utils" +) + +// Scheme defines the filesystem type. +const Scheme = "ftp" +const name = "File Transfer Protocol" + +var dataConnGetterFunc func(context.Context, utils.Authority, *FileSystem, *File, types.OpenType) (types.DataConn, error) +var defaultClientGetter func(context.Context, utils.Authority, Options) (client types.Client, err error) + +// FileSystem implements vfs.Filesystem for the FTP filesystem. +type FileSystem struct { + options vfs.Options + ftpclient types.Client + dataconn types.DataConn + resetConn bool +} + +// Retry will return the default no-op retrier. The FTP client provides its own retryer interface, and is available +// to override via the ftp.FileSystem Options type. +func (fs *FileSystem) Retry() vfs.Retry { + return vfs.DefaultRetryer() +} + +// NewFile function returns the FTP implementation of vfs.File. +func (fs *FileSystem) NewFile(authority, filePath string) (vfs.File, error) { + if fs == nil { + return nil, errors.New("non-nil ftp.FileSystem pointer is required") + } + if filePath == "" { + return nil, errors.New("non-empty string for path is required") + } + if err := utils.ValidateAbsoluteFilePath(filePath); err != nil { + return nil, err + } + + auth, err := utils.NewAuthority(authority) + if err != nil { + return nil, err + } + + return &File{ + fileSystem: fs, + authority: auth, + path: path.Clean(filePath), + }, nil +} + +// NewLocation function returns the FTP implementation of vfs.Location. +func (fs *FileSystem) NewLocation(authority, locPath string) (vfs.Location, error) { + if fs == nil { + return nil, errors.New("non-nil ftp.FileSystem pointer is required") + } + if err := utils.ValidateAbsoluteLocationPath(locPath); err != nil { + return nil, err + } + + auth, err := utils.NewAuthority(authority) + if err != nil { + return nil, err + } + + return &Location{ + fileSystem: fs, + path: utils.EnsureTrailingSlash(path.Clean(locPath)), + Authority: auth, + }, nil +} + +// Name returns "Secure File Transfer Protocol" +func (fs *FileSystem) Name() string { + return name +} + +// Scheme return "ftp" as the initial part of a file URI ie: ftp:// +func (fs *FileSystem) Scheme() string { + return Scheme +} + +// DataConn returns the underlying ftp data connection, creating it, if necessary +// See Overview for authentication resolution +func (fs *FileSystem) DataConn(ctx context.Context, authority utils.Authority, t types.OpenType, f *File) (types.DataConn, error) { + if t != types.SingleOp && f == nil { + return nil, errors.New("can not create DataConn for read or write for a nil file") + } + return dataConnGetterFunc(ctx, authority, fs, f, t) +} + +// Client returns the underlying ftp client, creating it, if necessary +// See Overview for authentication resolution +func (fs *FileSystem) Client(ctx context.Context, authority utils.Authority) (types.Client, error) { + if fs.ftpclient == nil { + if fs.options == nil { + fs.options = Options{} + } + + if opts, ok := fs.options.(Options); ok { + var err error + fs.ftpclient, err = defaultClientGetter(ctx, authority, opts) + if err != nil { + return nil, err + } + } else { + return nil, fmt.Errorf("unable to create client, vfs.Options must be an ftp.Options") + } + } + return fs.ftpclient, nil +} + +// WithOptions sets options for client and returns the filesystem (chainable) +func (fs *FileSystem) WithOptions(opts vfs.Options) *FileSystem { + + // only set options if vfs.Options is ftp.Options + if opts, ok := opts.(Options); ok { + fs.options = opts + // we set client to nil to ensure that a new client is created using the new context when Client() is called + fs.ftpclient = nil + } + return fs +} + +// WithClient passes in an ftp client and returns the filesystem (chainable) +func (fs *FileSystem) WithClient(client types.Client) *FileSystem { + fs.ftpclient = client + fs.options = nil + + return fs +} + +// NewFileSystem initializer for fileSystem struct. +func NewFileSystem() *FileSystem { + return &FileSystem{} +} + +func init() { + defaultClientGetter = getClient + dataConnGetterFunc = getDataConn + // registers a default Filesystem + backend.Register(Scheme, NewFileSystem()) +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/ftp/location.go b/vendor/github.com/c2fo/vfs/v6/backend/ftp/location.go new file mode 100644 index 000000000..572714dc9 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/ftp/location.go @@ -0,0 +1,232 @@ +package ftp + +import ( + "context" + "fmt" + "path" + "regexp" + "strings" + + _ftp "github.com/jlaffaye/ftp" + + "github.com/c2fo/vfs/v6" + "github.com/c2fo/vfs/v6/backend/ftp/types" + "github.com/c2fo/vfs/v6/options" + "github.com/c2fo/vfs/v6/utils" +) + +// Location implements the vfs.Location interface specific to ftp fs. +type Location struct { + fileSystem *FileSystem + path string + Authority utils.Authority +} + +// List calls FTP ReadDir to list all files in the location's path. +// If you have many thousands of files at the given location, this could become quite expensive. +func (l *Location) List() ([]string, error) { + var filenames []string + dc, err := l.fileSystem.DataConn(context.TODO(), l.Authority, types.SingleOp, nil) + if err != nil { + return filenames, err + } + + entries, err := dc.List(l.Path()) + if err != nil { + if strings.HasPrefix(err.Error(), fmt.Sprintf("%d", _ftp.StatusFileUnavailable)) { + // in this case the directory does not exist + return filenames, nil + } + return filenames, err + } + for _, entry := range entries { + if entry.Type == _ftp.EntryTypeFile { + filenames = append(filenames, entry.Name) + } + } + + return filenames, nil +} + +// ListByPrefix calls FTP ReadDir with the location's path modified relatively by the prefix arg passed to the function. +// - Returns ([]string{}, nil) in the case of a non-existent directory/prefix/location. +// - "relative" prefixes are allowed, ie, listByPrefix from "/some/path/" with prefix "to/somepattern" is the same as +// location "/some/path/to/" with prefix of "somepattern" +// - If the user cares about the distinction between an empty location and a non-existent one, Location.Exists() should +// be checked first. +func (l *Location) ListByPrefix(prefix string) ([]string, error) { + var filenames = make([]string, 0) + + // validate prefix + if err := utils.ValidatePrefix(prefix); err != nil { + return filenames, err + } + + // get absolute prefix path (in case prefix contains relative prefix, ie, some/path/to/myprefix) + fullpath := path.Join(l.Path(), prefix) + + // get prefix and location path after any relative pathing is resolved + // For example, given: + // loc, _ := fs.NewLocation("user@host:21", "/some/path/") + // loc.ListByPrefix("subdir/prefix") + // the location fullpath should resolve to be "/some/path/subdir/" while the prefix would be "prefix". + baseprefix := "" + if prefix == "." { + // for prefix of ".", it is necessary to manually set baseprefix as "." and + // add trailing slash since path.Join thinks that "." is a directory + baseprefix = prefix + fullpath = utils.EnsureTrailingSlash(fullpath) + } else { + // get baseprefix fix from the fullpath + baseprefix = path.Base(fullpath) + // get absolute dir path of fullpath + fullpath = utils.EnsureTrailingSlash(path.Dir(fullpath)) + } + + // get dataconn + dc, err := l.fileSystem.DataConn(context.TODO(), l.Authority, types.SingleOp, nil) + if err != nil { + return filenames, err + } + + // list directory entries + entries, err := dc.List(fullpath) + if err != nil { + // fullpath does not exist, is not an error here + if strings.HasPrefix(err.Error(), fmt.Sprintf("%d", _ftp.StatusFileUnavailable)) { + // in this case the directory does not exist + return []string{}, nil + } + return filenames, err + } + + for _, entry := range entries { + // find entries that match prefix and are files + if entry.Type == _ftp.EntryTypeFile && strings.HasPrefix(entry.Name, baseprefix) { + filenames = append(filenames, entry.Name) + } + } + + return filenames, nil +} + +// ListByRegex retrieves the filenames of all the files at the location's current path, then filters out all those +// that don't match the given regex. The resource considerations of List() apply here as well. +func (l *Location) ListByRegex(regex *regexp.Regexp) ([]string, error) { + filenames, err := l.List() + if err != nil { + return nil, err + } + + var filteredFilenames []string + for _, filename := range filenames { + if regex.MatchString(filename) { + filteredFilenames = append(filteredFilenames, filename) + } + } + return filteredFilenames, nil +} + +// Volume returns the Authority the location is contained in. +func (l *Location) Volume() string { + return l.Authority.String() +} + +// Path returns the path the location references in most FTP calls. +func (l *Location) Path() string { + return utils.EnsureLeadingSlash(utils.EnsureTrailingSlash(l.path)) +} + +// Exists returns true if the remote FTP directory exists. +func (l *Location) Exists() (bool, error) { + dc, err := l.fileSystem.DataConn(context.TODO(), l.Authority, types.SingleOp, nil) + if err != nil { + return false, err + } + + locBasename := path.Base(l.Path()) + // get parent directory by removing the last part of the path + parentDir := strings.TrimSuffix(l.Path(), locBasename+"/") + + entries, err := dc.List(parentDir) + if err != nil { + if strings.HasPrefix(err.Error(), fmt.Sprintf("%d", _ftp.StatusFileUnavailable)) { + // in this case the directory does not exist + return false, nil + } + return false, err + } + + for i := range entries { + if entries[i].Name == locBasename && entries[i].Type == _ftp.EntryTypeFolder { + return true, nil + } + } + + return false, nil +} + +// NewLocation makes a copy of the underlying Location, then modifies its path by calling ChangeDir with the +// relativePath argument, returning the resulting location. The only possible errors come from the call to +// ChangeDir, which, for the FTP implementation doesn't ever result in an error. +func (l *Location) NewLocation(relativePath string) (vfs.Location, error) { + // make a copy of the original location first, then ChangeDir, leaving the original location as-is + newLocation := &Location{} + *newLocation = *l + err := newLocation.ChangeDir(relativePath) + if err != nil { + return nil, err + } + return newLocation, nil +} + +// ChangeDir takes a relative path, and modifies the underlying Location's path. The caller is modified by this +// so the only return is any error. For this implementation there are no errors. +func (l *Location) ChangeDir(relativePath string) error { + err := utils.ValidateRelativeLocationPath(relativePath) + if err != nil { + return err + } + l.path = utils.EnsureLeadingSlash(utils.EnsureTrailingSlash(path.Join(l.path, relativePath))) + return nil +} + +// NewFile uses the properties of the calling location to generate a vfs.File (backed by an ftp.File). The filePath +// argument is expected to be a relative path to the location's current path. +func (l *Location) NewFile(filePath string) (vfs.File, error) { + err := utils.ValidateRelativeFilePath(filePath) + if err != nil { + return nil, err + } + newFile := &File{ + fileSystem: l.fileSystem, + authority: l.Authority, + path: utils.EnsureLeadingSlash(path.Join(l.path, filePath)), + } + return newFile, nil +} + +// DeleteFile removes the file at fileName path. +func (l *Location) DeleteFile(fileName string, _ ...options.DeleteOption) error { + file, err := l.NewFile(fileName) + if err != nil { + return err + } + + return file.Delete() +} + +// FileSystem returns a vfs.fileSystem interface of the location's underlying fileSystem. +func (l *Location) FileSystem() vfs.FileSystem { + return l.fileSystem +} + +// URI returns the Location's URI as a string. +func (l *Location) URI() string { + return utils.EncodeURI(l.FileSystem().Scheme(), l.Authority.UserInfo().Username(), l.Authority.HostPortStr(), l.Path()) +} + +// String implement fmt.Stringer, returning the location's URI as the default string. +func (l *Location) String() string { + return l.URI() +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/ftp/options.go b/vendor/github.com/c2fo/vfs/v6/backend/ftp/options.go new file mode 100644 index 000000000..947f7db18 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/ftp/options.go @@ -0,0 +1,188 @@ +package ftp + +import ( + "context" + "crypto/tls" + "fmt" + "io" + "os" + "strings" + "time" + + _ftp "github.com/jlaffaye/ftp" + + "github.com/c2fo/vfs/v6/backend/ftp/types" + "github.com/c2fo/vfs/v6/utils" +) + +// Options struct implements the vfs.Options interface, providing optional parameters for creating and ftp filesystem. +type Options struct { + Password string // env var VFS_FTP_PASSWORD + Protocol string // env var VFS_FTP_PROTOCOL + DisableEPSV *bool // env var VFS_DISABLE_EPSV + DebugWriter io.Writer + TLSConfig *tls.Config + DialTimeout time.Duration +} + +const ( + // ProtocolFTP signifies plain, unencrypted FTP + ProtocolFTP = "FTP" + // ProtocolFTPS signifies FTP over implicit TLS + ProtocolFTPS = "FTPS" + // ProtocolFTPES signifies FTP over explicit TLS + ProtocolFTPES = "FTPES" + + defaultUsername = "anonymous" + defaultPassword = "anonymous" + defaultPort uint16 = 21 + + envDisableEPSV = "VFS_FTP_DISABLE_EPSV" + envProtocol = "VFS_FTP_PROTOCOL" + envPassword = "VFS_FTP_PASSWORD" //nolint:gosec +) + +func getClient(ctx context.Context, authority utils.Authority, opts Options) (types.Client, error) { + // dial connection + c, err := _ftp.Dial(fetchHostPortString(authority), fetchDialOptions(ctx, authority, opts)...) + if err != nil { + return nil, err + } + + // login + err = c.Login(fetchUsername(authority), fetchPassword(opts)) + if err != nil { + return nil, err + } + + return c, nil +} + +func fetchUsername(auth utils.Authority) string { + // set default username + username := defaultUsername + + // override with authority, if any + if auth.UserInfo().Username() != "" { + username = auth.UserInfo().Username() + } + + return username +} + +// note: since the format "user:pass" in the authority userinfo field is deprecated (per https://tools.ietf.org/html/rfc3986#section-3.2.1) +// it is not used by fetchPassword and should never be included in a vfs URI +func fetchPassword(opts Options) string { + // set default password + password := defaultPassword + + // override with env var, if any + if _, ok := os.LookupEnv(envPassword); ok { + password = os.Getenv(envPassword) + } + + // override with options, if any + if opts.Password != "" { + password = opts.Password + } + + return password +} + +func fetchHostPortString(auth utils.Authority) string { + // get host + host := auth.Host() + + // get port + port := defaultPort + if auth.Port() > 0 { + port = auth.Port() + } + + // return : string + return fmt.Sprintf("%s:%d", host, port) +} + +func fetchDialOptions(ctx context.Context, auth utils.Authority, opts Options) []_ftp.DialOption { + // set context DialOption + dialOptions := []_ftp.DialOption{ + _ftp.DialWithContext(ctx), + } + + // determine DisableEPSV DialOption + dialOptions = append(dialOptions, _ftp.DialWithDisabledEPSV(isDisableOption(opts))) + + // determine protocol-specific (FTPS/FTPeS) TLS DialOption, if any (defaults to plain FTP, no TLS) + switch protocol := fetchProtocol(opts); { + case strings.EqualFold(protocol, ProtocolFTPS): + dialOptions = append(dialOptions, _ftp.DialWithTLS(fetchTLSConfig(auth, opts))) + case strings.EqualFold(protocol, ProtocolFTPES): + dialOptions = append(dialOptions, _ftp.DialWithExplicitTLS(fetchTLSConfig(auth, opts))) + } + + // determine debug writer DialOption, if any + if opts.DebugWriter != nil { + dialOptions = append(dialOptions, _ftp.DialWithDebugOutput(opts.DebugWriter)) + } + + // determine dial timeout DialOption + if opts.DialTimeout.Seconds() > 0 { + dialOptions = append(dialOptions, _ftp.DialWithTimeout(opts.DialTimeout)) + } + + return dialOptions +} + +func isDisableOption(opts Options) bool { + // default to false, meaning EPSV stays enabled + disableEpsv := false + + // override with env var, if any + if _, ok := os.LookupEnv(envDisableEPSV); ok { + setting := os.Getenv(envDisableEPSV) + if strings.EqualFold(setting, "true") || setting == "1" { + disableEpsv = true + } + } + + // override with Options, if any + if opts.DisableEPSV != nil { + disableEpsv = *opts.DisableEPSV + } + + return disableEpsv +} + +func fetchTLSConfig(auth utils.Authority, opts Options) *tls.Config { + // setup basic TLS config for host + tlsConfig := &tls.Config{ + MinVersion: tls.VersionTLS12, + InsecureSkipVerify: true, //nolint:gosec + ClientSessionCache: tls.NewLRUClientSessionCache(0), + ServerName: auth.Host(), + } + + // override with Options, if any + if opts.TLSConfig != nil { + tlsConfig = opts.TLSConfig + } + + return tlsConfig +} + +func fetchProtocol(opts Options) string { + // set default protocol + protocol := ProtocolFTP + + // override with env var + if _, ok := os.LookupEnv(envProtocol); ok { + protocol = os.Getenv(envProtocol) + } + + // override with options value + if opts.Protocol != "" { + protocol = opts.Protocol + } + + return protocol +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/ftp/types/types.go b/vendor/github.com/c2fo/vfs/v6/backend/ftp/types/types.go new file mode 100644 index 000000000..4f93d0444 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/ftp/types/types.go @@ -0,0 +1,50 @@ +package types + +import ( + "io" + "time" + + _ftp "github.com/jlaffaye/ftp" +) + +// OpenType represents the mode(read or write) that we open a file for. +type OpenType int + +const ( + _ OpenType = iota + // OpenRead denotes Read mode + OpenRead + // OpenWrite denotes Write mode + OpenWrite + SingleOp +) + +// DataConn represents a data connection +type DataConn interface { + Mode() OpenType + Delete(path string) error + GetEntry(p string) (*_ftp.Entry, error) + List(p string) ([]*_ftp.Entry, error) // NLST for just names + MakeDir(path string) error + Rename(from, to string) error + IsSetTimeSupported() bool + SetTime(path string, t time.Time) error + IsTimePreciseInList() bool + io.ReadWriteCloser +} + +// Client is an interface to make it easier to test +type Client interface { + Delete(path string) error + GetEntry(p string) (*_ftp.Entry, error) + List(p string) ([]*_ftp.Entry, error) // NLST for just names + Login(user string, password string) error + MakeDir(path string) error + Quit() error + Rename(from, to string) error + RetrFrom(path string, offset uint64) (*_ftp.Response, error) + StorFrom(path string, r io.Reader, offset uint64) error + IsSetTimeSupported() bool + SetTime(path string, t time.Time) error + IsTimePreciseInList() bool +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/gs/bucketHandleWrapper.go b/vendor/github.com/c2fo/vfs/v6/backend/gs/bucketHandleWrapper.go new file mode 100644 index 000000000..15063b484 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/gs/bucketHandleWrapper.go @@ -0,0 +1,83 @@ +package gs + +import ( + "context" + + "cloud.google.com/go/storage" + + "github.com/c2fo/vfs/v6" +) + +// BucketHandle is an interface which contains a subset of the functions provided +// by storage.BucketHandler. Any function normally called directly by storage.BucketHandler +// should be added to this interface to allow for proper retry wrapping of the functions +// which call the GCS API. +type BucketHandle interface { + Attrs(ctx context.Context) (*storage.BucketAttrs, error) +} + +// BucketHandleWrapper is a unique, wrapped type which should mimic the behavior of BucketHandler, but with +// modified return types. Each function that returns a sub type that also should be wrapped should be added +// to this interface with the 'Wrapped' prefix. +type BucketHandleWrapper interface { + BucketHandle + WrappedObjects(ctx context.Context, q *storage.Query) ObjectIteratorWrapper +} + +// RetryBucketHandler implements the BucketHandle interface +type RetryBucketHandler struct { + Retry vfs.Retry + handler *storage.BucketHandle +} + +// Attrs accetps a context and returns bucket attrs wrapped in a retry +func (r *RetryBucketHandler) Attrs(ctx context.Context) (*storage.BucketAttrs, error) { + return bucketAttributeRetry(r.Retry, func() (*storage.BucketAttrs, error) { + return r.handler.Attrs(ctx) + }) +} + +// WrappedObjects returns an iterator over the objects in the bucket that match the Query q, all wrapped in a retry. +// If q is nil, no filtering is done. +func (r *RetryBucketHandler) WrappedObjects(ctx context.Context, q *storage.Query) ObjectIteratorWrapper { + return &RetryObjectIterator{Retry: r.Retry, iterator: r.handler.Objects(ctx, q)} +} + +// ObjectIteratorWrapper is an interface which contains a subset of the functions provided by storage.ObjectIterator. +type ObjectIteratorWrapper interface { + Next() (*storage.ObjectAttrs, error) +} + +// RetryObjectIterator implements the ObjectIteratorWrapper interface +type RetryObjectIterator struct { + Retry vfs.Retry + iterator *storage.ObjectIterator +} + +// Next returns the next result, wrapped in retry. Its second return value is iterator.Done if +// there are no more results. Once Next returns iterator.Done, all subsequent +// calls will return iterator.Done. +// +// If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will +// have a non-empty Prefix field, and a zero value for all other fields. These +// represent prefixes. +func (r *RetryObjectIterator) Next() (*storage.ObjectAttrs, error) { + return objectAttributeRetry(r.Retry, func() (*storage.ObjectAttrs, error) { + return r.iterator.Next() + }) +} + +func bucketAttributeRetry(retry vfs.Retry, attrFunc func() (*storage.BucketAttrs, error)) (*storage.BucketAttrs, error) { + var attrs *storage.BucketAttrs + if err := retry(func() error { + var retryErr error + attrs, retryErr = attrFunc() + if retryErr != nil { + return retryErr + } + return nil + }); err != nil { + return nil, err + } + return attrs, nil +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/gs/doc.go b/vendor/github.com/c2fo/vfs/v6/backend/gs/doc.go new file mode 100644 index 000000000..657bb9787 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/gs/doc.go @@ -0,0 +1,74 @@ +/* +Package gs Google Cloud Storage VFS implementation. + +# Usage + +Rely on github.com/c2fo/vfs/backend + + import( + "github.com/c2fo/vfs/v6/backend" + "github.com/c2fo/vfs/v6/backend/gs" + ) + + func UseFs() error { + fs := backend.Backend(gs.Scheme) + ... + } + +Or call directly: + + import "github.com/c2fo/vfs/v6/backend/gs" + + func DoSomething() { + fs := gs.NewFilesystem() + ... + } + +gs can be augmented with the following implementation-specific methods. Backend returns vfs.Filesystem interface so it +would have to be cast as gs.Filesystem to use the following: + + func DoSomething() { + + ... + + // cast if fs was created using backend.Backend(). Not necessary if created directly from gs.NewFilesystem(). + fs = fs.(gs.Filesystem) + + // to use your own "context" + ctx := context.Background() + fs = fs.WithContext(ctx) + + // to pass in client options + fs = fs.WithOptions( + gs.Options{ + CredentialFile: "/root/.gcloud/account.json", + Scopes: []string{"ScopeReadOnly"}, + //default scope is "ScopeFullControl" + }, + ) + + // to pass specific client, for instance no-auth client + ctx := context.Background() + client, _ := storage.NewClient(ctx, option.WithoutAuthentication()) + fs = fs.WithClient(client) + } + +# Authentication + +Authentication, by default, occurs automatically when Client() is called. It looks for credentials in the following places, +preferring the first location found: + + 1. A JSON file whose path is specified by the GOOGLE_APPLICATION_CREDENTIALS environment variable + 2. A JSON file in a location known to the gcloud command-line tool. + On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. + On other systems, $HOME/.config/gcloud/application_default_credentials.json. + 3. On Google App Engine it uses the appengine.AccessToken function. + 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches credentials from the metadata server. + +See https://cloud.google.com/docs/authentication/production for more auth info + +# See Also + +See: https://github.com/googleapis/google-cloud-go/tree/master/storage +*/ +package gs diff --git a/vendor/github.com/c2fo/vfs/v6/backend/gs/file.go b/vendor/github.com/c2fo/vfs/v6/backend/gs/file.go new file mode 100644 index 000000000..87fd7ee86 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/gs/file.go @@ -0,0 +1,757 @@ +package gs + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "path" + "time" + + "cloud.google.com/go/storage" + "google.golang.org/api/iterator" + + "github.com/c2fo/vfs/v6" + "github.com/c2fo/vfs/v6/backend" + "github.com/c2fo/vfs/v6/options" + "github.com/c2fo/vfs/v6/options/delete" + "github.com/c2fo/vfs/v6/utils" +) + +const ( + doesNotExistError = "storage: object doesn't exist" +) + +// File implements vfs.File interface for GS fs. +type File struct { + fileSystem *FileSystem + bucket string + key string + + // seek-related fields + cursorPos int64 + seekCalled bool + + // read-related fields + reader io.ReadCloser + readCalled bool + + // write-related fields + tempFileWriter *os.File + gcsWriter io.WriteCloser + cancelFunc context.CancelFunc + writeCalled bool +} + +// Close commits any writes, either from the GCS writer stream or from a tempfile (in the case where Seek or Read are +// called after Write). It then cleans up any open resources and resets the file's state. +func (f *File) Close() error { + defer func() { + // reset state + f.reader = nil + f.cancelFunc = nil + f.gcsWriter = nil + f.cursorPos = 0 + f.seekCalled = false + f.readCalled = false + f.writeCalled = false + }() + + // cleanup reader (unless reader is also the writer tempfile) + if f.reader != nil && !f.writeCalled { + // close reader + if err := f.reader.Close(); err != nil { + return utils.WrapCloseError(err) + } + } + + // finalize writer + if f.gcsWriter != nil { + // close gcsWriter + if err := f.gcsWriter.Close(); err != nil { + return utils.WrapCloseError(err) + } + } else if f.tempFileWriter != nil { // gcsWriter is nil but tempFileWriter is not nil (seek after write, write after seek) + // write tempFileWriter to gcs + if err := f.tempToGCS(); err != nil { + return utils.WrapCloseError(err) + } + } + + // cleanup tempFileWriter + if f.tempFileWriter != nil { + if err := f.cleanupTempFile(); err != nil { + return utils.WrapCloseError(err) + } + } + + // close reader + if f.reader != nil && !f.writeCalled { + err := f.reader.Close() + if err != nil { + return utils.WrapCloseError(err) + } + } + + return nil +} + +func (f *File) tempToGCS() error { + + handle, err := f.getObjectHandle() + if err != nil { + return err + } + + w := handle.NewWriter(f.fileSystem.ctx) + defer func() { _ = w.Close() }() + + _, err = f.tempFileWriter.Seek(0, io.SeekStart) + if err != nil { + return err + } + + buffer := make([]byte, utils.TouchCopyMinBufferSize) + if _, err := io.CopyBuffer(w, f.tempFileWriter, buffer); err != nil { + return err + } + + return nil +} + +func (f *File) cleanupTempFile() error { + if f.tempFileWriter != nil { + err := f.tempFileWriter.Close() + if err != nil { + return err + } + + err = os.Remove(f.tempFileWriter.Name()) + if err != nil { + return err + } + + f.tempFileWriter = nil + } + + return nil +} + +// Read implements the standard for io.Reader. +func (f *File) Read(p []byte) (n int, err error) { + // check/initialize for reader + r, err := f.getReader() + if err != nil { + return 0, utils.WrapReadError(err) + } + + read, err := r.Read(p) + if err != nil { + // if we got io.EOF, we'll return the read and the EOF error + // because io.Copy looks for EOF to determine if it's done + // and doesn't support error wrapping + if errors.Is(err, io.EOF) { + return read, io.EOF + } + return read, utils.WrapReadError(err) + } + + f.cursorPos += int64(read) + f.readCalled = true + + return read, nil +} + +func (f *File) getReader() (io.ReadCloser, error) { + if f.reader == nil { + if f.writeCalled && f.tempFileWriter != nil { + // we've edited or truncated the file, so we need to read from the temp file which should already be at the + // current cursor position + f.reader = f.tempFileWriter + } else { + // get object handle + h, err := f.getObjectHandle() + if err != nil { + return nil, err + } + + // get range reader (from current cursor position to end of file) + reader, err := h.NewRangeReader(f.fileSystem.ctx, f.cursorPos, -1) + if err != nil { + return nil, err + } + + // Set the reader to the body of the object + f.reader = reader + } + } + return f.reader, nil +} + +// Seek implements the standard for io.Seeker. +func (f *File) Seek(offset int64, whence int) (int64, error) { + // get length of file + var length uint64 + if f.writeCalled { + // if write has been called, then the length is the cursorPos + length = uint64(f.cursorPos) + } else { + var err error + length, err = f.Size() + if err != nil { + return 0, utils.WrapSeekError(err) + } + } + + // invalidate reader (if any) + if f.reader != nil { + err := f.reader.Close() + if err != nil { + return 0, utils.WrapSeekError(err) + } + + f.reader = nil + } + + // invalidate gcsWriter + if f.gcsWriter != nil { + // cancel gcsWriter + f.cancelFunc() + f.cancelFunc = nil + + f.gcsWriter = nil + } + + // update seek position for tempFileWriter writer (if any) + if f.tempFileWriter != nil { + // seek tempFileWriter + _, err := f.tempFileWriter.Seek(offset, whence) + if err != nil { + return 0, utils.WrapSeekError(err) + } + } + + // update cursorPos + pos, err := utils.SeekTo(int64(length), f.cursorPos, offset, whence) + if err != nil { + return 0, utils.WrapSeekError(err) + } + f.cursorPos = pos + + f.seekCalled = true + return f.cursorPos, nil +} + +// Write implements the standard for io.Writer. Note that writes are not committed to GCS until CLose() is called. +func (f *File) Write(data []byte) (int, error) { + // Here, we initialize both a tempFileWriter and a gcsWriter if they haven't been initialized yet. + // Then write to both the local tempFileWriter and the gcsWriter stream. We do this on the unlikely chance + // that the file is being written to is later Seek()'d to or Read() from before Close() is called. + // That would necessarily mean that the cursor for any later writes would change. Since we can't alter the current + // GCS stream, we cancel it and would need to write to the tempFileWriter only. Any later Close() would then write + // the tempFileWriter to GCS. + // This is a rare case, but is meant to emulate the behavior of a standard POSIX file system. + // We might consider placing each write in a goroutine with a WaitGroup if this becomes a performance issue. + + // check/initialize for writer + err := f.initWriters() + if err != nil { + return 0, utils.WrapWriteError(err) + } + + // write to tempfile + written, err := f.tempFileWriter.Write(data) + if err != nil { + return 0, utils.WrapWriteError(err) + } + + // write to gcs + if f.gcsWriter != nil { + // write to gcs + gcsWritten, err := f.gcsWriter.Write(data) + if err != nil { + return 0, utils.WrapWriteError(err) + } + + // ensure both writes are the same + if written != gcsWritten { + return 0, utils.WrapWriteError(errors.New("writers wrote different amounts of data")) + } + } + + // update cursorPos + f.cursorPos += int64(written) + f.writeCalled = true + + return written, nil +} + +func (f *File) initWriters() error { + if f.tempFileWriter == nil { + // Create temp file + tmpFile, err := os.CreateTemp("", fmt.Sprintf("vfs_gcs_%s.%d", f.Name(), time.Now().UnixNano())) + if err != nil { + return err + } + f.tempFileWriter = tmpFile + if f.cursorPos != 0 { + // if file exists(because cursor position is non-zero), we need to copy the existing gcsWriter file to temp + err := f.copyToLocalTempReader(tmpFile) + if err != nil { + return err + } + + // seek to cursorPos + if _, err := f.tempFileWriter.Seek(f.cursorPos, 0); err != nil { + return err + } + } + } + + // if we haven't seeked yet, we need to get the gcsWriter + if f.gcsWriter == nil { + if !f.seekCalled && !f.readCalled { + // setup cancelable context + ctx, cancel := context.WithCancel(f.fileSystem.ctx) + f.cancelFunc = cancel + + // get object handle + handle, err := f.getObjectHandle() + if err != nil { + return err + } + + // get gcsWriter + w := handle.NewWriter(ctx) + if err != nil { + return err + } + + // set gcsWriter + f.gcsWriter = w + } + } + + return nil +} + +// String returns the file URI string. +func (f *File) String() string { + return f.URI() +} + +// Exists returns a boolean of whether or not the object exists in GCS. +func (f *File) Exists() (bool, error) { + _, err := f.getObjectAttrs() + if err != nil { + if err.Error() == doesNotExistError { + return false, nil + } + return false, err + } + return true, nil +} + +// Location returns a Location instance for the file's current location. +func (f *File) Location() vfs.Location { + return vfs.Location(&Location{ + fileSystem: f.fileSystem, + prefix: utils.EnsureTrailingSlash(utils.EnsureLeadingSlash(path.Clean(path.Dir(f.key)))), + bucket: f.bucket, + }) +} + +// CopyToLocation creates a copy of *File, using the file's current name as the new file's +// name at the given location. If the given location is also GCS, the GCS API for copying +// files will be utilized, otherwise, standard io.Copy will be done to the new file. +func (f *File) CopyToLocation(location vfs.Location) (vfs.File, error) { + dest, err := location.NewFile(f.Name()) + if err != nil { + return nil, err + } + err = f.CopyToFile(dest) + if err != nil { + return nil, err + } + return dest, nil +} + +// CopyToFile puts the contents of File into the target vfs.File passed in. Uses the GCS CopierFrom +// method if the target file is also on GCS, otherwise uses io.CopyBuffer. +// This method should be called on a closed file or a file with 0 cursor position to avoid errors. +func (f *File) CopyToFile(file vfs.File) (err error) { + // Close file (f) reader regardless of an error + defer func() { + // close writer + wErr := file.Close() + // close reader + rErr := f.Close() + // + if err == nil { + if wErr != nil { + err = wErr + } else if rErr != nil { + err = rErr + } + } + }() + + // validate seek is at 0,0 before doing copy + if err := backend.ValidateCopySeekPosition(f); err != nil { + return err + } + + // do native copy if same location/auth + if tf, ok := file.(*File); ok { + opts, ok := tf.Location().FileSystem().(*FileSystem).options.(Options) + if ok { + if f.isSameAuth(&opts) { + return f.copyWithinGCSToFile(tf) + } + } + } + + // Otherwise, use TouchCopyBuffered using io.CopyBuffer + fileBufferSize := 0 + + if opts, ok := f.Location().FileSystem().(*FileSystem).options.(Options); ok { + fileBufferSize = opts.FileBufferSize + } + + if err := utils.TouchCopyBuffered(file, f, fileBufferSize); err != nil { + return err + } + // Close target to flush and ensure that cursor isn't at the end of the file when the caller reopens for read + if cerr := file.Close(); cerr != nil { + return cerr + } + // Close file (f) reader + return err +} + +// MoveToLocation works by first calling File.CopyToLocation(vfs.Location) then, if that +// succeeds, it deletes the original file, returning the new file. If the copy process fails +// the error is returned, and the Delete isn't called. If the call to Delete fails, the error +// and the file generated by the copy are both returned. +func (f *File) MoveToLocation(location vfs.Location) (vfs.File, error) { + newFile, err := f.CopyToLocation(location) + if err != nil { + return nil, err + } + delErr := f.Delete() + return newFile, delErr +} + +// MoveToFile puts the contents of File into the target vfs.File passed in using File.CopyToFile. +// If the copy succeeds, the source file is deleted. Any errors from the copy or delete are +// returned. +func (f *File) MoveToFile(file vfs.File) error { + if err := f.CopyToFile(file); err != nil { + return err + } + + return f.Delete() +} + +// Delete clears any local temp file, or write buffer from read/writes to the file, then makes +// a DeleteObject call to GCS for the file. If DeleteAllVersions option is provided, +// DeleteObject call is made to GCS for each version of the file. Returns any error returned by the API. +func (f *File) Delete(opts ...options.DeleteOption) error { + if err := f.Close(); err != nil { + return err + } + + var deleteAllVersions bool + for _, o := range opts { + switch o.(type) { + case delete.DeleteAllVersions: + deleteAllVersions = true + default: + } + } + + handle, err := f.getObjectHandle() + if err != nil { + return err + } + err = handle.Delete(f.fileSystem.ctx) + if err != nil { + return err + } + + if deleteAllVersions { + handles, err := f.getObjectGenerationHandles() + if err != nil { + return err + } + for _, handle := range handles { + err := handle.Delete(f.fileSystem.ctx) + if err != nil { + return err + } + } + } + return nil +} + +// Touch creates a zero-length file on the vfs.File if no File exists. Update File's last modified timestamp. +// Returns error if unable to touch File. +func (f *File) Touch() error { + + // check if file exists + exists, err := f.Exists() + if err != nil { + return err + } + + // if file doesn't already exist, create it + if !exists { + return f.createEmptyFile() + } + + // already exists so update it so Last-Modified is updated + + // With versioning enabled, updates to custom meta won't update Updated date: + // from https://godoc.org/cloud.google.com/go/storage#ObjectAttrs: + // // Updated is the creation or modification time of the object. + // // For buckets with versioning enabled, changing an object's + // // metadata does not change this property. This field is read-only. + // Updated time.Time + + enabled, err := f.isBucketVersioningEnabled() + if err != nil { + return err + } + + if enabled { + return utils.UpdateLastModifiedByMoving(f) + } + + return f.updateLastModifiedByAttrUpdate() +} + +func (f *File) updateLastModifiedByAttrUpdate() error { + + // save original metadata (in case it was set already) + objAttrs, err := f.getObjectAttrs() + if err != nil { + return err + } + oldMetaData := objAttrs.Metadata + + // setup dummy metadata tag and update object with it + var updateAttrs storage.ObjectAttrsToUpdate + updateAttrs.Metadata = map[string]string{"updateMe": "true"} + + obj, err := f.getObjectHandle() + if err != nil { + return err + } + + cctx, cancel := context.WithCancel(f.fileSystem.ctx) + defer cancel() + + _, err = obj.Update(cctx, updateAttrs) + if err != nil { + return err + } + + // now switch metadata back to original values + updateAttrs.Metadata = oldMetaData + _, err = obj.Update(cctx, updateAttrs) + if err != nil { + return err + } + + return nil +} + +func (f *File) isBucketVersioningEnabled() (bool, error) { + client, err := f.fileSystem.Client() + if err != nil { + return false, err + } + cctx, cancel := context.WithCancel(f.fileSystem.ctx) + defer cancel() + attrs, err := client.Bucket(f.bucket).Attrs(cctx) + if err != nil { + return false, err + } + return attrs.VersioningEnabled, nil +} + +func (f *File) createEmptyFile() error { + + handle, err := f.getObjectHandle() + if err != nil { + return err + } + + // write zero length file. + ctx, cancel := context.WithCancel(f.fileSystem.ctx) + defer cancel() + + w := handle.NewWriter(ctx) + defer func() { _ = w.Close() }() + if _, err := w.Write(make([]byte, 0)); err != nil { + return err + } + + // return early + return nil +} + +func (f *File) isSameAuth(opts *Options) bool { + // If options are nil on both sides, assume Google's default context is used in both cases. + if opts == nil && f.fileSystem.options == nil { + return true + } + + if opts == nil || f.fileSystem.options == nil { + return false + } + + fOptions := f.fileSystem.options.(Options) + + if opts.CredentialFile != "" && opts.CredentialFile == fOptions.CredentialFile { + return true + } + + if opts.APIKey != "" && opts.APIKey == fOptions.APIKey { + return true + } + + return false +} + +// LastModified returns the 'Updated' property from the GCS attributes. +func (f *File) LastModified() (*time.Time, error) { + attr, err := f.getObjectAttrs() + if err != nil { + return nil, err + } + return &attr.Updated, nil +} + +// Size returns the 'Size' property from the GCS attributes. +func (f *File) Size() (uint64, error) { + attr, err := f.getObjectAttrs() + if err != nil { + return 0, err + } + return uint64(attr.Size), nil +} + +// Path returns full path with leading slash of the GCS file key. +func (f *File) Path() string { + return f.key +} + +// Name returns the file name. +func (f *File) Name() string { + return path.Base(f.key) +} + +// URI returns a full GCS URI string of the file. +func (f *File) URI() string { + return utils.GetFileURI(vfs.File(f)) +} + +func (f *File) copyToLocalTempReader(tmpFile *os.File) error { + + handle, err := f.getObjectHandle() + if err != nil { + return err + } + + outputReader, err := handle.NewReader(f.fileSystem.ctx) + if err != nil { + return err + } + + buffer := make([]byte, utils.TouchCopyMinBufferSize) + if _, err := io.CopyBuffer(tmpFile, outputReader, buffer); err != nil { + return err + } + + if err := outputReader.Close(); err != nil { + if cerr := tmpFile.Close(); cerr != nil { + return cerr + } + return err + } + + // Return cursor to the beginning of the new temp file + if _, err := tmpFile.Seek(0, 0); err != nil { + return err + } + + return nil +} + +// getObjectHandle returns cached Object struct for file +func (f *File) getObjectHandle() (ObjectHandleCopier, error) { + client, err := f.fileSystem.Client() + if err != nil { + return nil, err + } + + handler := client.Bucket(f.bucket).Object(utils.RemoveLeadingSlash(f.key)) + return &RetryObjectHandler{Retry: f.fileSystem.Retry(), handler: handler}, nil +} + +// getObjectGenerationHandles returns Object generation structs for file +func (f *File) getObjectGenerationHandles() ([]*storage.ObjectHandle, error) { + client, err := f.fileSystem.Client() + var handles []*storage.ObjectHandle + if err != nil { + return nil, err + } + it := client.Bucket(f.bucket). + Objects(f.fileSystem.ctx, &storage.Query{Versions: true, Prefix: utils.RemoveLeadingSlash(f.key)}) + + for { + attrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, err + } + handle := client.Bucket(attrs.Bucket).Object(attrs.Name).Generation(attrs.Generation) + handles = append(handles, handle) + } + return handles, err +} + +// getObjectAttrs returns the file's attributes +func (f *File) getObjectAttrs() (*storage.ObjectAttrs, error) { + handle, err := f.getObjectHandle() + if err != nil { + return nil, err + } + return handle.Attrs(f.fileSystem.ctx) +} + +func (f *File) copyWithinGCSToFile(targetFile *File) error { + tHandle, err := targetFile.getObjectHandle() + if err != nil { + return err + } + fHandle, err := f.getObjectHandle() + if err != nil { + return err + } + // Copy content and modify metadata. + copier := tHandle.WrappedCopierFrom(fHandle.ObjectHandle()) + attrs, gerr := f.getObjectAttrs() + if gerr != nil { + return gerr + } + copier.ContentType(attrs.ContentType) + + // Just copy content. + _, cerr := copier.Run(f.fileSystem.ctx) + return cerr +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/gs/fileSystem.go b/vendor/github.com/c2fo/vfs/v6/backend/gs/fileSystem.go new file mode 100644 index 000000000..35b780513 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/gs/fileSystem.go @@ -0,0 +1,145 @@ +package gs + +import ( + "errors" + "path" + + "cloud.google.com/go/storage" + "golang.org/x/net/context" + "google.golang.org/api/option" + + "github.com/c2fo/vfs/v6" + "github.com/c2fo/vfs/v6/backend" + "github.com/c2fo/vfs/v6/utils" +) + +// Scheme defines the file system type. +const Scheme = "gs" +const name = "Google Cloud Storage" + +// FileSystem implements vfs.FileSystem for the GCS file system. +type FileSystem struct { + client *storage.Client + ctx context.Context + options vfs.Options + clientCreator ClientCreator +} + +// Retry will return a retrier provided via options, or a no-op if none is provided. +func (fs *FileSystem) Retry() vfs.Retry { + if options, _ := fs.options.(Options); options.Retry != nil { + return options.Retry + } + return vfs.DefaultRetryer() +} + +// NewFile function returns the gcs implementation of vfs.File. +func (fs *FileSystem) NewFile(volume, name string) (vfs.File, error) { + if fs == nil { + return nil, errors.New("non-nil gs.FileSystem pointer is required") + } + if volume == "" || name == "" { + return nil, errors.New("non-empty strings for Bucket and Key are required") + } + if err := utils.ValidateAbsoluteFilePath(name); err != nil { + return nil, err + } + return &File{ + fileSystem: fs, + bucket: volume, + key: path.Clean(name), + }, nil +} + +// NewLocation function returns the GCS implementation of vfs.Location. +func (fs *FileSystem) NewLocation(volume, name string) (loc vfs.Location, err error) { + if fs == nil { + return nil, errors.New("non-nil gs.FileSystem pointer is required") + } + if volume == "" || name == "" { + return nil, errors.New("non-empty strings for bucket and key are required") + } + if err := utils.ValidateAbsoluteLocationPath(name); err != nil { + return nil, err + } + loc = &Location{ + fileSystem: fs, + bucket: volume, + prefix: utils.EnsureTrailingSlash(path.Clean(name)), + } + return +} + +// Name returns "Google Cloud Storage" +func (fs *FileSystem) Name() string { + return name +} + +// Scheme return "gs" as the initial part of a file URI ie: gs:// +func (fs *FileSystem) Scheme() string { + return Scheme +} + +// Client returns the underlying google storage client, creating it, if necessary +// See Overview for authentication resolution +func (fs *FileSystem) Client() (*storage.Client, error) { + if fs.client == nil { + gsClientOpts := parseClientOptions(fs.options) + client, err := fs.clientCreator.NewClient(fs.ctx, gsClientOpts...) + if err != nil { + return nil, err + } + fs.client = client + } + return fs.client, nil +} + +// WithOptions sets options for client and returns the file system (chainable) +func (fs *FileSystem) WithOptions(opts vfs.Options) *FileSystem { + fs.options = opts + // we set client to nil to ensure that a new client is created using the new context when Client() is called + fs.client = nil + return fs +} + +// WithContext passes in user context and returns the file system (chainable) +func (fs *FileSystem) WithContext(ctx context.Context) *FileSystem { + fs.ctx = ctx + // we set client to nil to ensure that a new client is created using the new context when Client() is called + fs.client = nil + return fs +} + +// WithClient passes in a google storage client and returns the file system (chainable) +func (fs *FileSystem) WithClient(client *storage.Client) *FileSystem { + fs.client = client + return fs +} + +// ClientCreator defines an interface for creating a new Google Cloud Storage client. +type ClientCreator interface { + NewClient(ctx context.Context, opts ...option.ClientOption) (*storage.Client, error) +} + +// defaultClientCreator is the default implementation of ClientCreator. +type defaultClientCreator struct{} + +// NewClient is a function that creates a new Google Cloud Storage client. +func (d *defaultClientCreator) NewClient(ctx context.Context, opts ...option.ClientOption) (*storage.Client, error) { + return storage.NewClient(ctx, opts...) +} + +// NewFileSystem initializer for FileSystem struct accepts google cloud storage client and returns Filesystem or error. +func NewFileSystem() *FileSystem { + fs := &FileSystem{ + ctx: context.Background(), + clientCreator: &defaultClientCreator{}, + } + + return fs +} + +func init() { + // registers a default Filesystem + backend.Register(Scheme, NewFileSystem()) +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/gs/location.go b/vendor/github.com/c2fo/vfs/v6/backend/gs/location.go new file mode 100644 index 000000000..f4ad7d306 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/gs/location.go @@ -0,0 +1,217 @@ +package gs + +import ( + "errors" + "path" + "regexp" + "strings" + + "cloud.google.com/go/storage" + "google.golang.org/api/iterator" + + "github.com/c2fo/vfs/v6" + "github.com/c2fo/vfs/v6/options" + "github.com/c2fo/vfs/v6/utils" +) + +// Location implements vfs.Location for gs fs. +type Location struct { + fileSystem *FileSystem + prefix string + bucket string + bucketHandle BucketHandleWrapper +} + +// String returns the full URI of the location. +func (l *Location) String() string { + return l.URI() +} + +// List returns a list of file name strings for the current location. +func (l *Location) List() ([]string, error) { + return l.ListByPrefix("") +} + +// ListByPrefix returns a slice of file base names and any error, if any +// List functions return only file basenames +func (l *Location) ListByPrefix(filenamePrefix string) ([]string, error) { + prefix := utils.RemoveLeadingSlash(path.Join(l.prefix, filenamePrefix)) + // add trailing slash to location prefix when file query prefix is empty: + // NewLocation("/some/path/").ListByPrefix("") + // OR when it ended with a slash (for directory level searches): + // NewLocation("/some/path/").ListByPrefix("dir1/dir2/") + // obviously we don't want to add a trailing slash if we're looking for a file prefix: + // NewLocation("/some/path/").ListByPrefix("dir1/MyFilePrefix") + if filenamePrefix == "" || filenamePrefix[len(filenamePrefix)-1:] == "/" { + prefix = utils.EnsureTrailingSlash(prefix) + } + // remove location prefix altogether if this is the root + if prefix == "/" { + prefix = "" + } + d := path.Dir(prefix) + q := &storage.Query{ + Delimiter: "/", + Prefix: prefix, + Versions: false, + } + + handle, err := l.getBucketHandle() + if err != nil { + return nil, err + } + var fileNames []string + + it := handle.WrappedObjects(l.fileSystem.ctx, q) + for { + objAttrs, err := it.Next() + if err != nil { + if err == iterator.Done { + break + } + return nil, err + } + // only include objects, not "directories" + if objAttrs.Prefix == "" && objAttrs.Name != d && !strings.HasSuffix(objAttrs.Name, "/") { + fn := strings.TrimPrefix(objAttrs.Name, utils.EnsureTrailingSlash(d)) + fileNames = append(fileNames, fn) + } + } + + return fileNames, nil +} + +// ListByRegex returns a list of file names at the location which match the provided regular expression. +func (l *Location) ListByRegex(regex *regexp.Regexp) ([]string, error) { + keys, err := l.List() + if err != nil { + return []string{}, err + } + + var filteredKeys []string + for _, key := range keys { + if regex.MatchString(key) { + filteredKeys = append(filteredKeys, key) + } + } + return filteredKeys, nil +} + +// Volume returns the GCS bucket name. +func (l *Location) Volume() string { + return l.bucket +} + +// Path returns the path of the file at the current location, starting with a leading '/' +func (l *Location) Path() string { + return utils.EnsureLeadingSlash(utils.EnsureTrailingSlash(l.prefix)) +} + +// Exists returns whether the location exists or not. In the case of an error, false is returned. +func (l *Location) Exists() (bool, error) { + _, err := l.getBucketAttrs() + if err != nil { + if err == storage.ErrBucketNotExist { + return false, nil + } + return false, err + } + return true, nil +} + +// NewLocation creates a new location instance relative to the current location's path. +func (l *Location) NewLocation(relativePath string) (vfs.Location, error) { + if l == nil { + return nil, errors.New("non-nil gs.Location pointer is required") + } + + // make a copy of the original location first, then ChangeDir, leaving the original location as-is + newLocation := &Location{} + *newLocation = *l + err := newLocation.ChangeDir(relativePath) + if err != nil { + return nil, err + } + return newLocation, nil +} + +// ChangeDir changes the current location's path to the new, relative path. +func (l *Location) ChangeDir(relativePath string) error { + if l == nil { + return errors.New("non-nil gs.Location pointer is required") + } + if relativePath == "" { + return errors.New("non-empty string relativePath is required") + } + err := utils.ValidateRelativeLocationPath(relativePath) + if err != nil { + return err + } + l.prefix = utils.EnsureTrailingSlash(utils.EnsureLeadingSlash(path.Join(l.prefix, relativePath))) + return nil +} + +// FileSystem returns the GCS file system instance. +func (l *Location) FileSystem() vfs.FileSystem { + return l.fileSystem +} + +// NewFile returns a new file instance at the given path, relative to the current location. +func (l *Location) NewFile(filePath string) (vfs.File, error) { + if l == nil { + return nil, errors.New("non-nil gs.Location pointer is required") + } + if filePath == "" { + return nil, errors.New("non-empty string filePath is required") + } + err := utils.ValidateRelativeFilePath(filePath) + if err != nil { + return nil, err + } + newFile := &File{ + fileSystem: l.fileSystem, + bucket: l.bucket, + key: utils.EnsureLeadingSlash(path.Join(l.prefix, filePath)), + } + return newFile, nil +} + +// DeleteFile deletes the file at the given path, relative to the current location. +func (l *Location) DeleteFile(fileName string, opts ...options.DeleteOption) error { + file, err := l.NewFile(fileName) + if err != nil { + return err + } + + return file.Delete(opts...) +} + +// URI returns a URI string for the GCS location. +func (l *Location) URI() string { + return utils.GetLocationURI(l) +} + +// getBucketHandle returns cached Bucket struct for file +func (l *Location) getBucketHandle() (BucketHandleWrapper, error) { + if l.bucketHandle != nil { + return l.bucketHandle, nil + } + + client, err := l.fileSystem.Client() + if err != nil { + return nil, err + } + handler := &RetryBucketHandler{Retry: l.fileSystem.Retry(), handler: client.Bucket(l.bucket)} + l.bucketHandle = handler + return l.bucketHandle, nil +} + +// getObjectAttrs returns the file's attributes +func (l *Location) getBucketAttrs() (*storage.BucketAttrs, error) { + handle, err := l.getBucketHandle() + if err != nil { + return nil, err + } + + return handle.Attrs(l.fileSystem.ctx) +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/gs/objectHandleWrapper.go b/vendor/github.com/c2fo/vfs/v6/backend/gs/objectHandleWrapper.go new file mode 100644 index 000000000..4dcccc3e5 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/gs/objectHandleWrapper.go @@ -0,0 +1,161 @@ +package gs + +import ( + "context" + "errors" + + "cloud.google.com/go/storage" + "google.golang.org/api/iterator" + + "github.com/c2fo/vfs/v6" +) + +// ObjectHandleWrapper is an interface which contains a subset of the functions provided +// by storage.ObjectHandler. Any function normally called directly by storage.ObjectHandler +// should be added to this interface to allow for proper retry wrapping of the functions +// which call the GCS API. +type ObjectHandleWrapper interface { + NewWriter(ctx context.Context) *storage.Writer + NewReader(ctx context.Context) (*storage.Reader, error) + NewRangeReader(ctx context.Context, offset, length int64) (*storage.Reader, error) + Attrs(ctx context.Context) (*storage.ObjectAttrs, error) + Delete(ctx context.Context) error + Update(ctx context.Context, attrs storage.ObjectAttrsToUpdate) (*storage.ObjectAttrs, error) +} + +// ObjectHandleCopier is a unique, wrapped type which should mimic the behavior of ObjectHandler, but with +// modified return types. Each function that returns a sub type that also should be wrapped should be added +// to this interface with the 'Wrapped' prefix. +type ObjectHandleCopier interface { + ObjectHandleWrapper + WrappedCopierFrom(src *storage.ObjectHandle) CopierWrapper + ObjectHandle() *storage.ObjectHandle +} + +// CopierWrapper is an interface which contains a subset of the functions provided by storage.Copier. +type CopierWrapper interface { + Run(ctx context.Context) (*storage.ObjectAttrs, error) + ContentType(string) +} + +// RetryObjectHandler implements the ObjectHandleCopier interface (which also is composed with ObjectHandleWrapper) +type RetryObjectHandler struct { + Retry vfs.Retry + handler *storage.ObjectHandle +} + +// NewWriter returns a storage Writer that writes to the GCS object +// associated with this ObjectHandle, wrapped in a retry. +func (r *RetryObjectHandler) NewWriter(ctx context.Context) *storage.Writer { + return r.handler.NewWriter(ctx) +} + +// NewReader creates a new Reader to read the contents of the object, wrapped in a retry. +// ErrObjectNotExist will be returned if the object is not found. +func (r *RetryObjectHandler) NewReader(ctx context.Context) (*storage.Reader, error) { + var reader *storage.Reader + if err := r.Retry(func() error { + var retryErr error + reader, retryErr = r.handler.NewReader(ctx) + if retryErr != nil { + return retryErr + } + return nil + }); err != nil { + return nil, err + } + return reader, nil +} + +// NewRangeReader creates a new Reader to read the contents of the object starting at an offset, wrapped in a retry. +// ErrObjectNotExist will be returned if the object is not found. +func (r *RetryObjectHandler) NewRangeReader(ctx context.Context, offset, length int64) (*storage.Reader, error) { + var reader *storage.Reader + if err := r.Retry(func() error { + var retryErr error + reader, retryErr = r.handler.NewRangeReader(ctx, offset, length) + if retryErr != nil { + return retryErr + } + return nil + }); err != nil { + return nil, err + } + return reader, nil +} + +// Attrs represents the metadata for a Google Cloud Storage (GCS) object, wrapped in a retry. +func (r *RetryObjectHandler) Attrs(ctx context.Context) (*storage.ObjectAttrs, error) { + return objectAttributeRetry(r.Retry, func() (*storage.ObjectAttrs, error) { + return r.handler.Attrs(ctx) + }) +} + +// Update will update the metadata for a Google Cloud Storage (GCS) object, wrapped in a retry. +// Here we can re-use the objectAttributeRetry() func because it has the same signature. +func (r *RetryObjectHandler) Update(ctx context.Context, attrs storage.ObjectAttrsToUpdate) (*storage.ObjectAttrs, error) { + return objectAttributeRetry(r.Retry, func() (*storage.ObjectAttrs, error) { + return r.handler.Update(ctx, attrs) + }) +} + +// Delete deletes the single specified object, wrapped in a retry. +func (r *RetryObjectHandler) Delete(ctx context.Context) error { + if err := r.Retry(func() error { + if retryErr := r.handler.Delete(ctx); retryErr != nil { + return retryErr + } + return nil + }); err != nil { + return err + } + return nil +} + +// WrappedCopierFrom creates a Copier that can copy src to dst, wrapped in a retry. +// You can immediately call Run on the returned Copier, or +// you can configure it first. +func (r *RetryObjectHandler) WrappedCopierFrom(src *storage.ObjectHandle) CopierWrapper { + return &Copier{copier: r.handler.CopierFrom(src), Retry: r.Retry} +} + +// ObjectHandle returns the underlying GCS object handle. +func (r *RetryObjectHandler) ObjectHandle() *storage.ObjectHandle { + return r.handler +} + +// Copier implements the CopierWrapper interface. +type Copier struct { + copier *storage.Copier + Retry vfs.Retry +} + +// ContentType is the MIME type of the object's content. +func (c *Copier) ContentType(val string) { + c.copier.ContentType = val +} + +// Run performs the copy, wrapped in a retry +func (c *Copier) Run(ctx context.Context) (*storage.ObjectAttrs, error) { + return objectAttributeRetry(c.Retry, func() (*storage.ObjectAttrs, error) { + return c.copier.Run(ctx) + }) +} + +func objectAttributeRetry(retry vfs.Retry, attrFunc func() (*storage.ObjectAttrs, error)) (*storage.ObjectAttrs, error) { + var attrs *storage.ObjectAttrs + attrs, err := attrFunc() + if err != nil && !errors.Is(err, iterator.Done) { + if err := retry(func() error { + var retryErr error + attrs, retryErr = attrFunc() + if retryErr != nil { + return retryErr + } + return nil + }); err != nil { + return nil, err + } + } + return attrs, err +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/gs/options.go b/vendor/github.com/c2fo/vfs/v6/backend/gs/options.go new file mode 100644 index 000000000..bba02f5a1 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/gs/options.go @@ -0,0 +1,36 @@ +package gs + +import ( + "google.golang.org/api/option" + + "github.com/c2fo/vfs/v6" +) + +// Options holds Google Cloud Storage -specific options. Currently only client options are used. +type Options struct { + APIKey string `json:"apiKey,omitempty"` + CredentialFile string `json:"credentialFilePath,omitempty"` + Endpoint string `json:"endpoint,omitempty"` + Scopes []string `json:"WithoutAuthentication,omitempty"` + Retry vfs.Retry + FileBufferSize int // Buffer Size In Bytes Used with utils.TouchCopyBuffered +} + +func parseClientOptions(opts vfs.Options) []option.ClientOption { + var googleClientOpts []option.ClientOption + + // we only care about 'gs.Options' types, skip anything else + if opts, ok := opts.(Options); ok { + switch { + case opts.APIKey != "": + googleClientOpts = append(googleClientOpts, option.WithAPIKey(opts.APIKey)) + case opts.CredentialFile != "": + googleClientOpts = append(googleClientOpts, option.WithCredentialsFile(opts.CredentialFile)) + case opts.Endpoint != "": + googleClientOpts = append(googleClientOpts, option.WithEndpoint(opts.Endpoint)) + case len(opts.Scopes) > 0: + googleClientOpts = append(googleClientOpts, option.WithScopes(opts.Scopes...)) + } + } + return googleClientOpts +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/helpers.go b/vendor/github.com/c2fo/vfs/v6/backend/helpers.go new file mode 100644 index 000000000..4229405e0 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/helpers.go @@ -0,0 +1,23 @@ +package backend + +import ( + "fmt" + "io" + + "github.com/c2fo/vfs/v6" +) + +// ValidateCopySeekPosition return ensures current seek cursor is 0,0. This is useful to ensure it's safe to copy. A seek position +// elsewhere will mean a partial copy. +func ValidateCopySeekPosition(f vfs.File) error { + // validate seek is at 0,0 before doing copy + offset, err := f.Seek(0, io.SeekCurrent) + if err != nil { + return fmt.Errorf("failed to determine current cursor offset: %w", err) + } + if offset != 0 { + return vfs.CopyToNotPossible + } + + return nil +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/mem/doc.go b/vendor/github.com/c2fo/vfs/v6/backend/mem/doc.go new file mode 100644 index 000000000..f07c6ace9 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/mem/doc.go @@ -0,0 +1,23 @@ +/* +Package mem built-in mem lib VFS implementation. +Usage +Rely on github.com/c2fo/vfs/v6/backend + + import( + "github.com/c2fo/vfs/v6/backend" + "github.com/c2fo/vfs/v6/backend/mem" + ) + func UseFs() error { + fs := backend.Backend(mem.Scheme) + ... + } + +Or call directly: + + import _mem "github.com/c2fo/vfs/v6/backend/mem" + func DoSomething() { + fs := _mem.NewFileSystem() + ... + } +*/ +package mem diff --git a/vendor/github.com/c2fo/vfs/v6/backend/mem/file.go b/vendor/github.com/c2fo/vfs/v6/backend/mem/file.go new file mode 100644 index 000000000..bd6b9e874 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/mem/file.go @@ -0,0 +1,549 @@ +package mem + +import ( + "errors" + "io" + "io/fs" + "path" + "sync" + "time" + + "github.com/c2fo/vfs/v6" + "github.com/c2fo/vfs/v6/backend" + "github.com/c2fo/vfs/v6/options" + "github.com/c2fo/vfs/v6/utils" +) + +type mode int + +const ( + none mode = iota + truncate + edit +) + +// memFile represents the file's at-rest state on the file system. including its contents, location, and lastModified time +type memFile struct { + sync.Mutex + exists bool + contents []byte // the file's contents at rest + location *Location + lastModified time.Time + name string + filepath string +} + +// File implements vfs.File interface for the in-memory implementation of FileSystem. +// A file struct holds a pointer to a single memFile. Multiple threads will refer to the same +// memFile. Simultaneous reading is allowed, but writing and closing are protected by locks. +type File struct { + memFile *memFile + readWriteSeeker *ReadWriteSeeker + name string // the base name of the file + cursor int + writeMode mode + isOpen bool + readCalled bool + seekCalled bool +} + +func nilReference() error { + return errors.New("the target file passed in was nil") +} + +// Close imitates io.Closer by resetting the cursor and setting a boolean +func (f *File) Close() error { + if f.isOpen { + // update the contents of the memFile + f.memFile.Lock() + defer f.memFile.Unlock() + f.memFile.contents = f.readWriteSeeker.Bytes() + f.memFile.lastModified = time.Now() + f.memFile.exists = true + f.memFile.location.exists = true + + // update the fsMap + f.Location().FileSystem().(*FileSystem).mu.Lock() + defer f.Location().FileSystem().(*FileSystem).mu.Unlock() + mapRef := f.Location().FileSystem().(*FileSystem).fsMap + if _, ok := mapRef[f.Location().Volume()]; ok { + if _, ok := mapRef[f.Location().Volume()][f.Path()]; ok { + // memfile exists, so we update it + mapRef[f.Location().Volume()][f.Path()].i = f.memFile + } else { + // memfile does not exist, so we create it + mapRef[f.Location().Volume()][f.Path()] = &fsObject{true, f.memFile} + } + } else { + // volume does not exist, so we create it with the memfile + mapRef[f.Location().Volume()] = make(objMap) + mapRef[f.Location().Volume()][f.Path()] = &fsObject{true, f.memFile} + } + } + + f.seekCalled = false + f.readCalled = false + f.readWriteSeeker = nil + f.writeMode = none + f.isOpen = false + f.cursor = 0 + + return nil +} + +// Read implements the io.Reader interface. Returns number of bytes read and potential errors +func (f *File) Read(p []byte) (n int, err error) { + var existsOnFS bool + if existsOnFS, err = f.Exists(); !existsOnFS && !f.isOpen { + if err != nil { + return 0, utils.WrapReadError(err) + } + return 0, utils.WrapReadError(fs.ErrNotExist) + } + + // in case the file contents have changed + if existsOnFS && f.writeMode == none { + // get the file's contents from fsMap + fsMap := f.Location().FileSystem().(*FileSystem).fsMap + if objMap, ok := fsMap[f.Location().Volume()]; ok { + if obj, ok := objMap[f.Path()]; ok { + if obj.isFile { + // update the file's memFile + f.memFile = obj.i.(*memFile) + } + } + } + + // update the file's readWriteSeeker contents and set the cursor to the current position + f.readWriteSeeker = NewReadWriteSeekerWithData(f.memFile.contents) + _, err = f.readWriteSeeker.Seek(int64(f.cursor), 0) + if err != nil { + return 0, utils.WrapReadError(err) + } + } + + // read file + read, err := f.readWriteSeeker.Read(p) + if err != nil { + // if we got io.EOF, we'll return the read and the EOF error + // because io.Copy looks for EOF to determine if it's done + // and doesn't support error wrapping + if errors.Is(err, io.EOF) { + return read, io.EOF + } + return read, utils.WrapReadError(err) + } + + // update open file's state + f.readCalled = true + f.isOpen = true + f.cursor += read + + return read, nil +} + +// Seek implements the io.Seeker interface. Returns the current position of the cursor and errors if any +func (f *File) Seek(offset int64, whence int) (int64, error) { + var existsOnFS bool + var err error + if existsOnFS, err = f.Exists(); !existsOnFS && !f.isOpen { + if err != nil { + return 0, utils.WrapSeekError(err) + } + return 0, utils.WrapSeekError(fs.ErrNotExist) + } + + // in case the file contents have changed + if existsOnFS && f.writeMode == none { + // update the file's memFile + fsMap := f.Location().FileSystem().(*FileSystem).fsMap + if objMap, ok := fsMap[f.Location().Volume()]; ok { + if obj, ok := objMap[f.Path()]; ok { + if obj.isFile { + // update the file's memFile + f.memFile = obj.i.(*memFile) + } + } + } + + // update the file's readWriteSeeker contents and set the cursor to the current position + f.readWriteSeeker = NewReadWriteSeekerWithData(f.memFile.contents) + _, err := f.readWriteSeeker.Seek(int64(f.cursor), 0) + if err != nil { + return 0, utils.WrapSeekError(err) + } + } + + // seek file + pos, err := f.readWriteSeeker.Seek(offset, whence) + if err != nil { + return 0, utils.WrapSeekError(err) + } + + // update open file's state + f.isOpen = true + f.seekCalled = true + f.cursor = int(pos) + + return pos, nil +} + +// Write implements the io.Writer interface. Returns number of bytes written and any errors. +// Unless Seek or Read is called first, Write's should overwrite any existing file. Otherwise, it should edit the file +// in place. +func (f *File) Write(p []byte) (int, error) { + // if the file has not yet been opened for writing, set the writeMode and readWriteSeeker + if f.writeMode == none { + if f.readCalled || f.seekCalled { + // file has been read or seeked first, so we are in edit mode + f.writeMode = edit + f.readWriteSeeker = NewReadWriteSeekerWithData(f.memFile.contents) + _, err := f.readWriteSeeker.Seek(int64(f.cursor), 0) + if err != nil { + return 0, utils.WrapWriteError(err) + } + } else { + // file has not been read or seeked first, so we are in truncate(overwrite) mode + f.readWriteSeeker = NewReadWriteSeeker() + f.writeMode = truncate + } + f.isOpen = true + } + + // write to file buffer (writes aren't committed to the filesystem file until Close is called) + written, err := f.readWriteSeeker.Write(p) + if err != nil { + return 0, utils.WrapWriteError(err) + } + + // update the file's cursor + f.cursor += written + + return written, nil +} + +// String implements the io.Stringer interface. It returns a string representation of the file's URI +func (f *File) String() string { + return f.URI() +} + +// Exists returns whether a file exists. Creating a file does not guarantee its existence, but creating one and writing +// to it does. +func (f *File) Exists() (bool, error) { + // does it exist on the map? + vol := f.Location().Volume() + fullPath := f.Path() + loc := f.Location().(*Location) + mapRef := loc.fileSystem.fsMap + if _, ok := mapRef[vol]; ok { + if object, ok2 := mapRef[vol][fullPath]; ok2 { + if object != nil && object.i.(*memFile).exists { + return true, nil + } + } + } + return false, nil +} + +// Location simply returns the file's underlying location struct pointer +func (f *File) Location() vfs.Location { + // copy the location + location := *f.memFile.location + return &location +} + +// CopyToLocation copies the current file to the given location. If file exists +// at given location contents are simply overwritten using "CopyToFile", otherwise +// a newFile is made, takes the contents of the current file, and ends up at +// the given location +func (f *File) CopyToLocation(location vfs.Location) (vfs.File, error) { + if ok, err := f.Exists(); !ok { + if err != nil { + return nil, err + } + return nil, fs.ErrNotExist + } + testPath := path.Join(path.Clean(location.Path()), f.Name()) + thisLoc := f.Location().(*Location) + mapRef := thisLoc.fileSystem.fsMap + vol := thisLoc.Volume() + // making sure that this volume has keys at all + if _, ok := mapRef[vol]; ok { + // if file w/name exists @ loc, simply copy contents over + if _, ok2 := mapRef[vol][testPath]; ok2 { + // casting fsObject to a file + memFile := mapRef[vol][testPath].i.(*memFile) + file := deepCopy(memFile) + + cerr := f.CopyToFile(file) + + if cerr != nil { + return nil, cerr + } + return file, nil + } + } // end outer-if + + newFile, err := location.NewFile(f.Name()) + if err != nil { + return nil, err + } + + if err := f.CopyToFile(newFile); err != nil { + return nil, err + } + + return newFile, nil +} + +// CopyToFile copies the receiver file into the target file. Additionally, +// after this is called, f's cursor will reset as if it had been closed. +func (f *File) CopyToFile(target vfs.File) (err error) { + if f == nil || target == nil { + return nilReference() + } + // Close file (f) reader regardless of an error + defer func() { + // close writer + wErr := target.Close() + // close reader + rErr := f.Close() + // + if err == nil { + if wErr != nil { + err = wErr + } else if rErr != nil { + err = rErr + } + } + }() + + if exists, err := f.Exists(); !exists { + if err != nil { + return err + } + return fs.ErrNotExist + } + + // validate seek is at 0,0 before doing copy + if err := backend.ValidateCopySeekPosition(f); err != nil { + return err + } + + if target.Location().FileSystem().Scheme() == "mem" { + target.(*File).memFile.contents = make([]byte, 0) + } + + if _, err := target.Write(f.memFile.contents); err != nil { + return err + } + cerr := target.Close() + if cerr != nil { + return cerr + } + return err +} + +// MoveToLocation moves the receiver file to the passed in location. It does so by +// creating a copy of 'f' in "location". 'f' is subsequently deleted +func (f *File) MoveToLocation(location vfs.Location) (vfs.File, error) { + if f == nil || location == nil { + return nil, nilReference() + } + + if exists, err := f.Exists(); !exists { + if err != nil { + return nil, err + } + return nil, fs.ErrNotExist + } + + // if the underling FileSystem is in-memory, then this is the native way of + // replacing a file with the same name as "f" at the location + if location.FileSystem().Scheme() == "mem" { + // this is a potential path to a file that can be fed into the objMap portion of fsMap + testPath := path.Join(location.Path(), f.Name()) + loc := location.(*Location) + // mapRef just makes it easier to refer to "loc.fileSystem.fsMap" + mapRef := loc.fileSystem.fsMap + vol := loc.Volume() + f.memFile.location.FileSystem().(*FileSystem).mu.Lock() + // this checks if the specified volume has any keys + if _, ok := mapRef[vol]; ok { + // this block checks if the file already exists at location, if it does, deletes it and inserts the file we have + if _, ok2 := mapRef[vol][testPath]; ok2 { + memFile := mapRef[vol][testPath].i.(*memFile) + f.memFile.location.FileSystem().(*FileSystem).mu.Unlock() + file := deepCopy(memFile) + err := f.CopyToFile(file) + if err != nil { + return nil, err + } + + err = f.Delete() + if err != nil { + return nil, err + } + + return file, nil + } + } + f.memFile.location.FileSystem().(*FileSystem).mu.Unlock() + } + // if the file doesn't yet exist at the location, create it there + newFile, err := location.NewFile(f.Name()) + if err != nil { + return nil, err + } + // copying over the data + err = f.MoveToFile(newFile) + if err != nil { + return nil, err + } + return newFile, nil +} + +// MoveToFile creates a newFile, and moves it to "file". +// The receiver is always deleted (since it's being "moved") +func (f *File) MoveToFile(file vfs.File) error { + if f == nil { + return nilReference() + } + + if exists, err := f.Exists(); !exists { + if err != nil { + return err + } + return fs.ErrNotExist + } + if err := f.CopyToFile(file); err != nil { + return err + } + + return f.Delete() +} + +// Delete removes the file from the FileSystem. Sets it path in the fsMap to nil, +// and also nils the file's members +func (f *File) Delete(_ ...options.DeleteOption) error { + + if ex, _ := f.Exists(); !ex { + return fs.ErrNotExist + } + f.memFile.Lock() + defer f.memFile.Unlock() + loc := f.Location().(*Location) + mapRef := loc.fileSystem.fsMap + f.memFile.location.FileSystem().(*FileSystem).mu.Lock() + defer f.memFile.location.FileSystem().(*FileSystem).mu.Unlock() + // if there are keys at this volume + if _, ok := mapRef[loc.Volume()]; ok { + // checking for the object that should contain the file at this key + if thisObj, ok2 := mapRef[loc.Volume()][f.Path()]; ok2 { + str := f.Path() + // casting a file to the object's "i" interface + file := thisObj.i.(*memFile) + file.exists = false + file = nil + thisObj.i = nil + thisObj = nil + // setting that key to nil so it truly no longer lives on this system + delete(mapRef[loc.Volume()], str) + } + } + + return nil +} + +func newMemFile(file *File, location *Location) *memFile { + return &memFile{ + contents: make([]byte, 0), + location: location, + name: file.name, + filepath: path.Join(location.Path(), file.Name()), + } +} + +// LastModified simply returns the file's lastModified, if the file exists +func (f *File) LastModified() (*time.Time, error) { + if exists, err := f.Exists(); !exists { + if err != nil { + return nil, err + } + return nil, fs.ErrNotExist + } + return &f.memFile.lastModified, nil +} + +// Size returns the size of the file contents. In our case, the length of the file's byte slice +func (f *File) Size() (uint64, error) { + if exists, err := f.Exists(); !exists { + if err != nil { + return 0, err + } + return 0, fs.ErrNotExist + } + + // in case the file contents have changed + f.readWriteSeeker = NewReadWriteSeekerWithData(f.memFile.contents) + + return uint64(len(f.readWriteSeeker.Bytes())), nil + +} + +// Touch takes a in-memory vfs.File, makes it existent, and updates the lastModified +func (f *File) Touch() error { + if f.memFile.exists { + f.memFile.lastModified = time.Now() + + return nil + } + f.memFile.exists = true + + volume := f.Location().Volume() + f.memFile.lastModified = time.Now() + // files and locations are contained in objects of type "fsObject". + // An fsObject has a blank interface and a boolean that indicates whether or not it is a file + fileObject := &fsObject{ + true, + f.memFile, + } + locObject := &fsObject{ + false, + f.Location(), + } + + f.memFile.location.FileSystem().(*FileSystem).mu.Lock() + defer f.memFile.location.FileSystem().(*FileSystem).mu.Unlock() + // just a less clunky way of accessing the fsMap + mapRef := f.Location().FileSystem().(*FileSystem).fsMap + // if the objMap map does not exist for the volume yet, then we go ahead and create it. + if _, ok := mapRef[volume]; !ok { + mapRef[volume] = make(objMap) + } + + // setting the map at Volume volume and path of f to this fileObject + mapRef[volume][f.Path()] = fileObject + f.memFile = mapRef[volume][f.Path()].i.(*memFile) + locationPath := utils.EnsureTrailingSlash(path.Clean(path.Dir(f.Path()))) + // checking for locations that exist to avoid redundancy + if _, ok := mapRef[volume][locationPath]; !ok { + mapRef[volume][locationPath] = locObject + } + return nil +} + +// Path returns the absolute path to the file +func (f *File) Path() string { + return path.Join(f.Location().Path(), f.name) +} + +// Name returns the basename of the file +func (f *File) Name() string { + return f.name +} + +// URI returns the file's URI, if it exists +func (f *File) URI() string { + return utils.GetFileURI(f) +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/mem/fileSystem.go b/vendor/github.com/c2fo/vfs/v6/backend/mem/fileSystem.go new file mode 100644 index 000000000..e39f8ddd8 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/mem/fileSystem.go @@ -0,0 +1,181 @@ +package mem + +import ( + "path" + "sync" + + "github.com/c2fo/vfs/v6" + "github.com/c2fo/vfs/v6/backend" + "github.com/c2fo/vfs/v6/utils" +) + +// Scheme defines the FileSystem type's underlying implementation. +const Scheme = "mem" +const name = "In-Memory Filesystem" + +type fsObject struct { + isFile bool + i interface{} +} +type objMap map[string]*fsObject + +// FileSystem implements vfs.FileSystem for an in-memory file system. +type FileSystem struct { + mu sync.Mutex + fsMap map[string]objMap +} + +// Retry will return a retrier provided via options, or a no-op if none is provided. +func (fs *FileSystem) Retry() vfs.Retry { + return vfs.DefaultRetryer() +} + +// NewFile function returns the in-memory implementation of vfs.File. +// Since this is inside FileSystem, we assume that the caller knows that the CWD is the root. +// If a non-absolute path is given, an error is thrown. Additionally, a file does not +// technically exist until a call to "Touch()" is made on it. The "Touch" call links the +// file with FileSystem's map and brings it into existence. +// If a file is written to before a touch call, Write() will take care of that call. This is +// true for other functions as well and existence only poses a problem in the context of deletion +// or copying FROM a non-existent file. +func (fs *FileSystem) NewFile(volume, absFilePath string) (vfs.File, error) { + + err := utils.ValidateAbsoluteFilePath(absFilePath) + if err != nil { + return nil, err + } + + location, err := fs.NewLocation(volume, utils.EnsureTrailingSlash(path.Dir(absFilePath))) + if err != nil { + return nil, err + } + + fs.mu.Lock() + defer fs.mu.Unlock() + if _, ok := fs.fsMap[volume]; ok { + for _, obj := range fs.fsMap[volume] { + if obj.isFile && obj.i.(*memFile).location.Path() == location.Path() { + if obj.i.(*memFile).name == path.Base(absFilePath) { + vfsFile := &File{ + name: obj.i.(*memFile).name, + memFile: obj.i.(*memFile), + readWriteSeeker: NewReadWriteSeekerWithData(obj.i.(*memFile).contents), + } + return vfsFile, nil + } + } + } + } + // validateAbsFile path will throw an error if there was a trailing slash, hence not calling path.Clean() + file := &File{ + name: path.Base(absFilePath), + } + + memFile := newMemFile(file, location.(*Location)) + file.memFile = memFile + return file, nil +} + +// NewLocation function returns the in-memory implementation of vfs.Location. +// A location always exists. If a file is created on a location that has not yet +// been made in the fsMap, then the location will be created with the file +func (fs *FileSystem) NewLocation(volume, absLocPath string) (vfs.Location, error) { + + err := utils.ValidateAbsoluteLocationPath(absLocPath) + if err != nil { + return nil, err + } + str := utils.EnsureTrailingSlash(path.Clean(absLocPath)) + return &Location{ + fileSystem: fs, + name: str, + exists: false, + volume: volume, + }, nil + +} + +// Name returns the name of the underlying FileSystem +func (fs *FileSystem) Name() string { + return name +} + +// Scheme returns the scheme of the underlying FileSystem +func (fs *FileSystem) Scheme() string { + return Scheme +} + +// NewFileSystem is used to initialize the file system struct for an in-memory FileSystem. +func NewFileSystem() *FileSystem { + + return &FileSystem{ + sync.Mutex{}, + make(map[string]objMap), + } + +} + +func init() { + // Even though the map is being made here, a call to + backend.Register(Scheme, NewFileSystem()) + +} + +// getKeys is used to get a list of absolute paths on a specified volume. These paths are a mixture of files and locations +func (o objMap) getKeys() []string { + keyList := make([]string, 0) + for i := range o { + keyList = append(keyList, i) + } + return keyList +} + +// fileHere returns a list of file pointers found at the absolute location path provided. +// If none are there, returns an empty slice +func (o objMap) filesHere(absLocPath string) []*memFile { + + paths := o.getKeys() + fileList := make([]*memFile, 0) + for i := range paths { + + object := o[paths[i]] // retrieve the object + if ok := object != nil && object.isFile; ok { // if the object is a file, cast its interface, i, to a file and append to the slice + file := object.i.(*memFile) + if file.location.Path() == absLocPath { + fileList = append(fileList, file) + } + } + } + return fileList +} + +// fileNamesHere returns a list of base names of files found at the absolute location path provided. +// If none are there, returns an empty slice +func (o objMap) fileNamesHere(absLocPath string) []string { + + paths := o.getKeys() + fileList := make([]string, 0) + for i := range paths { + + object := o[paths[i]] // retrieve the object + if object != nil && object.isFile { // if the object is a file, cast its interface, i, to a file and append the name to the slice + file := object.i.(*memFile) + if utils.EnsureTrailingSlash(file.location.Path()) == absLocPath { + fileList = append(fileList, file.name) + } + } + } + return fileList +} + +func deepCopy(srcFile *memFile) vfs.File { + destination := &File{ + name: srcFile.name, + memFile: srcFile, + readWriteSeeker: NewReadWriteSeekerWithData(srcFile.contents), + } + + destination.memFile = srcFile + destination.readWriteSeeker = NewReadWriteSeekerWithData(srcFile.contents) + return destination +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/mem/location.go b/vendor/github.com/c2fo/vfs/v6/backend/mem/location.go new file mode 100644 index 000000000..551bed732 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/mem/location.go @@ -0,0 +1,232 @@ +package mem + +import ( + "errors" + "fmt" + "os" + "path" + "regexp" + "sort" + "strings" + + "github.com/c2fo/vfs/v6" + "github.com/c2fo/vfs/v6/options" + "github.com/c2fo/vfs/v6/utils" +) + +// Location implements the vfs.Location interface specific to in-memory FileSystem. +type Location struct { + exists bool + name string // the path that this location exists on + fileSystem *FileSystem + volume string +} + +// String implements io.Stringer by returning the location's URI as a string +func (l *Location) String() string { + + return l.URI() +} + +// List finds all of the files living at the current location and returns them in a slice of strings. +// If there are no files at location, then an empty slice will be returned +func (l *Location) List() ([]string, error) { + + locPath := l.Path() + // setting mapRef to this value for code readability + mapRef := l.fileSystem.fsMap + // are there paths on this volume? + if _, ok := mapRef[l.Volume()]; ok { + // getting a list of the file names on this location + list := mapRef[l.Volume()].fileNamesHere(locPath) + // fileNamesHere() returns an empty list if no files were found + return list, nil + } + // if the volume has nothing on it, return an empty list as well + return make([]string, 0), nil +} + +// ListByPrefix tags a prefix onto the current path and in a slice, +// returns all file base names whose full paths contain that substring +// Returns empty slice if nothing found +func (l *Location) ListByPrefix(prefix string) ([]string, error) { + + list := make([]string, 0) + str := path.Join(l.Path(), prefix) + mapRef := l.fileSystem.fsMap + if _, ok := mapRef[l.volume]; ok { + paths := mapRef[l.volume].getKeys() + for i := range paths { + if strings.Contains(paths[i], str) { + if path.Ext(paths[i]) != "" && strings.Contains(str, utils.EnsureTrailingSlash(path.Dir(paths[i]))) { + list = append(list, path.Base(paths[i])) + sort.Strings(list) + } + } + } + } + + return list, nil +} + +// ListByRegex takes a regular expression and returns +// a slice of strings containing the base names of files +// found that matched the regular expression. Returns an +// empty slice upon nothing found +func (l *Location) ListByRegex(regex *regexp.Regexp) ([]string, error) { + + list := make([]string, 0) + str := l.Path() + mapRef := l.fileSystem.fsMap + if _, ok := mapRef[l.Volume()]; ok { + namesHere := mapRef[l.Volume()].fileNamesHere(str) + for i := range namesHere { + if regex.MatchString(namesHere[i]) { + list = append(list, namesHere[i]) + } + } + } + return list, nil +} + +// Volume returns the volume of the current FileSystem. +func (l *Location) Volume() string { + return l.volume +} + +// Path returns the full, absolute path of the location with leading and trailing slashes +func (l *Location) Path() string { + + // just to be sure that we return a trailing and leading slash + str := utils.EnsureTrailingSlash(path.Clean(l.name)) + return utils.EnsureLeadingSlash(str) + +} + +// Exists always returns true on locations +func (l *Location) Exists() (bool, error) { + + l.exists = true + return true, nil +} + +// NewLocation creates a new location at the +// given relative path, which is tagged onto the current locations absolute path +func (l *Location) NewLocation(relLocPath string) (vfs.Location, error) { + err := utils.ValidateRelativeLocationPath(relLocPath) + if err != nil { + return nil, err + } + str := path.Join(l.Path(), relLocPath) + str = utils.EnsureTrailingSlash(path.Clean(str)) + mapRef := l.fileSystem.fsMap + // if the location already exists on the map, just return that one + if object, ok := mapRef[l.volume]; ok { + paths := object.getKeys() + for _, potentialPath := range paths { + if ok := potentialPath == str; ok { + return mapRef[l.volume][potentialPath].i.(*Location), nil + } + } + + } + return &Location{ + fileSystem: l.fileSystem, + name: str, + exists: false, + volume: l.Volume(), + }, nil + +} + +// ChangeDir simply changes the directory of the location +func (l *Location) ChangeDir(relLocPath string) error { + err := utils.ValidateRelativeLocationPath(relLocPath) + if err != nil { + return err + } + l.name = path.Join(l.name, relLocPath) + return nil + +} + +// FileSystem returns the type of file system location exists on, if it exists at all +func (l *Location) FileSystem() vfs.FileSystem { + + return l.fileSystem + +} + +// NewFile creates a vfs.File given its relative path and tags it onto "l's" path +func (l *Location) NewFile(relFilePath string) (vfs.File, error) { + + if relFilePath == "" { + return nil, errors.New("cannot use empty name for file") + } + err := utils.ValidateRelativeFilePath(relFilePath) + if err != nil { + return nil, err + } + + // after validating the path, we check to see if the + // file already exists. if it does, return a reference to it + mapRef := l.fileSystem.fsMap + relativeLocationPath := utils.EnsureTrailingSlash(path.Dir(path.Join(l.Path(), relFilePath))) + if _, ok := mapRef[l.volume]; ok { + fileList := mapRef[l.volume].filesHere(relativeLocationPath) + for _, file := range fileList { + if file.name == path.Base(relFilePath) { + fileCopy := deepCopy(file) + return fileCopy, nil + } + } + } + + // since the file didn't already exist, we will create a + // location and the file (NewLocation takes care of duplicates) + pref := l.Path() + str := relFilePath + nameStr := path.Join(pref, str) + + file := &File{ + name: path.Base(nameStr), + } + newLoc := *l + newLoc.name = relativeLocationPath + file.memFile = newMemFile(file, &newLoc) + return file, nil +} + +// DeleteFile locates the file given the fileName and calls delete on it +func (l *Location) DeleteFile(relFilePath string, _ ...options.DeleteOption) error { + err := utils.ValidateRelativeFilePath(relFilePath) + if err != nil { + return err + } + vol := l.Volume() + fullPath := path.Join(l.Path(), relFilePath) + + l.fileSystem.mu.Lock() + defer l.fileSystem.mu.Unlock() + mapRef := l.fileSystem.fsMap + if _, ok := mapRef[vol]; ok { + if thisObj, ok2 := mapRef[vol][fullPath]; ok2 { + file := thisObj.i.(*memFile) + file.exists = false + file = nil + thisObj.i = nil + thisObj = nil + mapRef[vol][fullPath] = nil // setting that key to nil so it truly no longer lives on this system + delete(mapRef[vol], fullPath) + return nil + } + } + + return fmt.Errorf("unable to delete file: %w", os.ErrNotExist) +} + +// URI returns the URI of the location if the location exists +func (l *Location) URI() string { + + return utils.GetLocationURI(l) +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/mem/readwriteseeker.go b/vendor/github.com/c2fo/vfs/v6/backend/mem/readwriteseeker.go new file mode 100644 index 000000000..173472406 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/mem/readwriteseeker.go @@ -0,0 +1,81 @@ +package mem + +import ( + "io" + + "github.com/c2fo/vfs/v6" +) + +// ReadWriteSeeker is a custom type that implements io.ReadWriteSeeker. +type ReadWriteSeeker struct { + data []byte + cursor int +} + +// NewReadWriteSeeker creates a new ReadWriteSeeker. +func NewReadWriteSeeker() *ReadWriteSeeker { + return &ReadWriteSeeker{ + data: []byte{}, + cursor: 0, + } +} + +// NewReadWriteSeekerWithData creates a new ReadWriteSeeker with the provided data. +func NewReadWriteSeekerWithData(data []byte) *ReadWriteSeeker { + return &ReadWriteSeeker{ + data: data, + cursor: 0, + } +} + +// Write writes data to the current cursor position and advances the cursor. +func (rws *ReadWriteSeeker) Write(p []byte) (n int, err error) { + position := rws.cursor + len(p) + if position <= len(rws.data) { + copy(rws.data[rws.cursor:position], p) + } else { + rws.data = append(rws.data[:rws.cursor], p...) + } + rws.cursor = position + return len(p), nil +} + +// Read reads data from the current cursor position and advances the cursor. +func (rws *ReadWriteSeeker) Read(p []byte) (n int, err error) { + if rws.cursor >= len(rws.data) { + return 0, io.EOF + } + + n = copy(p, rws.data[rws.cursor:]) + rws.cursor += n + return n, nil +} + +// Seek sets the cursor position. +func (rws *ReadWriteSeeker) Seek(offset int64, whence int) (int64, error) { + + var position int64 + switch whence { + case io.SeekStart: // io.SeekStart + position = offset + case io.SeekCurrent: // io.SeekCurrent + position = int64(rws.cursor) + offset + case io.SeekEnd: // io.SeekEnd + position = int64(len(rws.data)) + offset + default: + return 0, vfs.ErrSeekInvalidWhence + } + + if position < 0 || position > int64(len(rws.data)) { + return 0, vfs.ErrSeekInvalidOffset + } + + rws.cursor = int(position) + + return position, nil +} + +// Bytes returns a byte slice of the data. +func (rws *ReadWriteSeeker) Bytes() []byte { + return rws.data +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/os/doc.go b/vendor/github.com/c2fo/vfs/v6/backend/os/doc.go new file mode 100644 index 000000000..e0e9f649b --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/os/doc.go @@ -0,0 +1,31 @@ +/* +Package os built-in os lib VFS implementation. + +# Usage + +Rely on github.com/c2fo/vfs/v6/backend + + import( + "github.com/c2fo/vfs/v6/backend" + "github.com/c2fo/vfs/v6/backend/os" + ) + + func UseFs() error { + fs := backend.Backend(os.Scheme) + ... + } + +Or call directly: + + import _os "github.com/c2fo/vfs/v6/backend/os" + + func DoSomething() { + fs := &_os.FileSystem{} + ... + } + +# See Also + +See: https://golang.org/pkg/os/ +*/ +package os diff --git a/vendor/github.com/c2fo/vfs/v6/backend/os/file.go b/vendor/github.com/c2fo/vfs/v6/backend/os/file.go new file mode 100644 index 000000000..6cc98a467 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/os/file.go @@ -0,0 +1,497 @@ +package os + +import ( + "errors" + "fmt" + "io" + "os" + "path" + "path/filepath" + "time" + + "github.com/c2fo/vfs/v6" + "github.com/c2fo/vfs/v6/backend" + "github.com/c2fo/vfs/v6/options" + "github.com/c2fo/vfs/v6/utils" +) + +const osCrossDeviceLinkError = "invalid cross-device link" + +type opener func(filePath string) (*os.File, error) + +// File implements vfs.File interface for os fs. +type File struct { + file *os.File + name string + filesystem *FileSystem + cursorPos int64 + tempFile *os.File + useTempFile bool + fileOpener opener + seekCalled bool + readCalled bool +} + +// Delete unlinks the file returning any error or nil. +func (f *File) Delete(_ ...options.DeleteOption) error { + err := os.Remove(f.Path()) + if err == nil { + f.file = nil + } + return err +} + +// LastModified returns the timestamp of the file's mtime or error, if any. +func (f *File) LastModified() (*time.Time, error) { + stats, err := os.Stat(f.Path()) + if err != nil { + return nil, err + } + + statsTime := stats.ModTime() + return &statsTime, err +} + +// Name returns the base name of the file path. +// +// For `file:///some/path/to/file.txt`, it would return `file.txt` +func (f *File) Name() string { + return path.Base(f.name) +} + +// Path returns absolute path, including filename, +// For `file:///some/path/to/file.txt`, it would return `/some/path/to/file.txt` +// +// If the directory portion of a file is desired, call +// +// someFile.Location().Path() +func (f *File) Path() string { + return filepath.Join(f.Location().Path(), f.Name()) +} + +// Size returns the size (in bytes) of the File or any error. +func (f *File) Size() (uint64, error) { + stats, err := os.Stat(f.Path()) + if err != nil { + return 0, err + } + + return uint64(stats.Size()), err +} + +// Close implements the io.Closer interface, closing the underlying *os.File. its an error, if any. +func (f *File) Close() error { + f.useTempFile = false + f.cursorPos = 0 + f.seekCalled = false + f.readCalled = false + + // check if temp file + if f.tempFile != nil { + // close temp (os) file + err := f.tempFile.Close() + if err != nil { + return utils.WrapCloseError(err) + } + + // get original (os) file, open it if it has not been opened + finalFile, err := f.getInternalFile() + if err != nil { + return utils.WrapCloseError(err) + } + // rename temp file to actual file + err = safeOsRename(f.tempFile.Name(), finalFile.Name()) + if err != nil && !os.IsNotExist(err) { + return utils.WrapCloseError(err) + } + f.tempFile = nil + } + if f.file == nil { + // Do nothing on files that were never referenced + return nil + } + + err := f.file.Close() + if err != nil { + return utils.WrapCloseError(err) + } + f.file = nil + return nil +} + +// Read implements the io.Reader interface. It returns the bytes read and an error, if any. +func (f *File) Read(p []byte) (int, error) { + + // if we have not written to this file, ensure the original file exists + if !f.useTempFile { + if exists, err := f.Exists(); err != nil { + return 0, utils.WrapReadError(err) + } else if !exists { + return 0, utils.WrapReadError(fmt.Errorf("failed to read. File does not exist at %s", f)) + } + } + // get the file we need, either tempFile or original file + useFile, err := f.getInternalFile() + if err != nil { + return 0, utils.WrapReadError(err) + } + + read, err := useFile.Read(p) + if err != nil { + // if we got io.EOF, we'll return the read and the EOF error + // because io.Copy looks for EOF to determine if it's done + // and doesn't support error wrapping + if errors.Is(err, io.EOF) { + return read, io.EOF + } + return read, utils.WrapReadError(err) + } + + f.readCalled = true + f.cursorPos += int64(read) + + return read, nil +} + +// Seek implements the io.Seeker interface. It accepts an offset and "whence" where 0 means relative to the origin of +// the file, 1 means relative to the current offset, and 2 means relative to the end. It returns the new offset and +// an error, if any. +func (f *File) Seek(offset int64, whence int) (int64, error) { + // when writing, we first write to a temp file which ensures a file isn't created before we call close. + // However, if we've never written AND the original file doesn't exist, we can't seek. + exists, err := f.Exists() + if err != nil { + return 0, utils.WrapSeekError(err) + } + if !exists && !f.useTempFile { + return 0, utils.WrapSeekError(err) + } + useFile, err := f.getInternalFile() + if err != nil { + return 0, utils.WrapSeekError(err) + } + + f.cursorPos, err = useFile.Seek(offset, whence) + if err != nil { + return 0, utils.WrapSeekError(err) + } + + f.seekCalled = true + return f.cursorPos, err +} + +// Exists true if the file exists on the file system, otherwise false, and an error, if any. +func (f *File) Exists() (bool, error) { + _, err := os.Stat(f.Path()) + if err != nil { + // file does not exist + if os.IsNotExist(err) { + return false, nil + } + // some other error + return false, err + } + // file exists + return true, nil +} + +// Write implements the io.Writer interface. It accepts a slice of bytes and returns the number of bytes written and an error, if any. +func (f *File) Write(p []byte) (n int, err error) { + // useTempFile prevents the immediate update of the file until we Close() + f.useTempFile = true + + useFile, err := f.getInternalFile() + if err != nil { + return 0, utils.WrapWriteError(err) + } + write, err := useFile.Write(p) + if err != nil { + return 0, utils.WrapWriteError(err) + } + offset := int64(write) + f.cursorPos += offset + + return write, err +} + +// Location returns the underlying os.Location. +func (f *File) Location() vfs.Location { + return &Location{ + fileSystem: f.filesystem, + name: utils.EnsureTrailingSlash(path.Dir(f.name)), + } +} + +// MoveToFile move a file. It accepts a target vfs.File and returns an error, if any. +func (f *File) MoveToFile(file vfs.File) error { + // validate seek is at 0,0 before doing copy + if err := backend.ValidateCopySeekPosition(f); err != nil { + return err + } + // handle native os move/rename + if file.Location().FileSystem().Scheme() == Scheme { + return safeOsRename(f.Path(), file.Path()) + } + + // do copy/delete move for non-native os moves + if _, err := f.copyWithName(file.Name(), file.Location()); err != nil { + return err + } + return f.Delete() +} + +// safeOsRename will attempt to do an os.Rename. If error is "invalid cross-device link" (where one OS file is on a +// different device/volume than the other), then fall back to doing a copy-delete. +func safeOsRename(srcName, dstName string) error { + err := os.Rename(srcName, dstName) + if err != nil { + e, ok := err.(*os.LinkError) + if ok && e.Err.Error() == osCrossDeviceLinkError { + // do cross-device renaming + if err := osCopy(srcName, dstName); err != nil { + return err + } + // delete original file + return os.Remove(srcName) + } + // return non-CrossDeviceLink error + return err + } + return nil +} + +// osCopy just io.Copy's the os files +func osCopy(srcName, dstName string) error { + // setup os reader + srcReader, err := os.Open(srcName) //nolint:gosec + if err != nil { + return err + } + defer func() { _ = srcReader.Close() }() + + // setup os writer + dstWriter, err := os.Create(dstName) //nolint:gosec + if err != nil { + return err + } + defer func() { _ = dstWriter.Close() }() + + // copy os files. Note that os.Create always does a "touch" (creates an empty file before writing data) so no need to + // do a TouchCopy like we do with other filesystems. + buffer := make([]byte, utils.TouchCopyMinBufferSize) + _, err = io.CopyBuffer(dstWriter, srcReader, buffer) + return err +} + +// MoveToLocation moves a file to a new Location. It accepts a target vfs.Location and returns a vfs.File and an error, if any. +func (f *File) MoveToLocation(location vfs.Location) (vfs.File, error) { + if location.FileSystem().Scheme() == Scheme { + if err := ensureDir(location); err != nil { + return nil, err + } + } + + // do a MoveToFile call (delegating native rename vs copy/delete logic) + file, err := location.NewFile(f.Name()) + if err != nil { + return nil, err + } + err = f.MoveToFile(file) + if err != nil { + return nil, err + } + + // return vfs.File for newly moved file + return location.NewFile(f.Name()) +} + +// CopyToFile copies the file to a new File. It accepts a vfs.File and returns an error, if any. +func (f *File) CopyToFile(file vfs.File) error { + // validate seek is at 0,0 before doing copy + if err := backend.ValidateCopySeekPosition(f); err != nil { + return err + } + _, err := f.copyWithName(file.Name(), file.Location()) + return err +} + +// CopyToLocation copies existing File to new Location with the same name. +// It accepts a vfs.Location and returns a vfs.File and error, if any. +func (f *File) CopyToLocation(location vfs.Location) (vfs.File, error) { + // validate seek is at 0,0 before doing copy + if err := backend.ValidateCopySeekPosition(f); err != nil { + return nil, err + } + return f.copyWithName(f.Name(), location) +} + +// URI returns the File's URI as a string. +func (f *File) URI() string { + return utils.GetFileURI(f) +} + +// String implement fmt.Stringer, returning the file's URI as the default string. +func (f *File) String() string { + return f.URI() +} + +// Touch creates a zero-length file on the vfs.File if no File exists. Update File's last modified timestamp. +// Returns error if unable to touch File. +func (f *File) Touch() error { + exists, err := f.Exists() + if err != nil { + return err + } + + if !exists { + file, err := f.openFile() + if err != nil { + return err + } + f.file = file + return f.Close() + } + now := time.Now() + return os.Chtimes(f.Path(), now, now) +} + +func (f *File) copyWithName(name string, location vfs.Location) (vfs.File, error) { + newFile, err := location.FileSystem().NewFile(location.Volume(), path.Join(location.Path(), name)) + if err != nil { + return nil, err + } + + if err := utils.TouchCopyBuffered(newFile, f, utils.TouchCopyMinBufferSize); err != nil { + return nil, err + } + err = f.Close() + if err != nil { + return nil, err + } + + err = newFile.Close() + if err != nil { + return nil, err + } + return newFile, nil +} + +func (f *File) openFile() (*os.File, error) { + if f.file != nil { + return f.file, nil + } + + // replace default file opener, is set in struct + openFunc := openOSFile + if f.fileOpener != nil { + openFunc = f.fileOpener + } + + file, err := openFunc(f.Path()) + if err != nil { + return nil, err + } + f.file = file + + return file, nil +} + +func openOSFile(filePath string) (*os.File, error) { + + // Ensure the path exists before opening the file, NoOp if dir already exists. + var fileMode os.FileMode = 0666 + if err := os.MkdirAll(path.Dir(filePath), os.ModeDir|0750); err != nil { + return nil, err + } + + file, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE, fileMode) //nolint:gosec + return file, err +} + +func ensureDir(location vfs.Location) error { + if exists, err := location.Exists(); err != nil { + return err + } else if !exists { + if err := os.MkdirAll(location.Path(), os.ModeDir|0777); err != nil { + return err + } + } + return nil +} + +// If cursor is not (0,0) will copy original file to a temp file, +// opening its file descriptor to the current cursor position. +// If cursor is (0,0), just begin writing to new temp file. +// No need to copy original first. +func (f *File) getInternalFile() (*os.File, error) { + // this is the use case of vfs.file + if !f.useTempFile { + if f.file == nil { + // replace default file opener, is set in struct + openFunc := openOSFile + if f.fileOpener != nil { + openFunc = f.fileOpener + } + + finalFile, err := openFunc(f.Path()) + if err != nil { + return nil, err + } + f.file = finalFile + } + return f.file, nil + } + // this is the use case of vfs.tempFile + if f.tempFile == nil { + localTempFile, err := f.copyToLocalTempReader() + if err != nil { + return nil, err + } + f.tempFile = localTempFile + } + + return f.tempFile, nil +} + +func (f *File) copyToLocalTempReader() (*os.File, error) { + tmpFile, err := os.CreateTemp("", fmt.Sprintf("%s.%d", f.Name(), time.Now().UnixNano())) + if err != nil { + return nil, err + } + + exists, err := f.Exists() + if err != nil { + return nil, err + } + + // If file exists AND we've called Seek or Read first, any subsequent writes should edit the file (temp), + // so we copy the original file to the temp file then set the cursor position on the temp file to the current position. + // If we're opening because Write is called first, we always overwrite the file, so no need to copy the original contents. + // + // So imagine we have a file with content "hello world" and we call Seek(6, 0) and then Write([]byte("there")), the + // temp file should have "hello there" and not "there". Then finally when Close is called, the temp file is renamed + // to the original file. This code ensures that scenario works as expected. + if exists && (f.seekCalled || f.readCalled) { + openFunc := openOSFile + if f.fileOpener != nil { + openFunc = f.fileOpener + } + + actualFile, err := openFunc(f.Path()) + if err != nil { + return nil, err + } + if _, err := io.Copy(tmpFile, actualFile); err != nil { + return nil, err + } + + if f.cursorPos > 0 { + // match cursor position in tmep file + if _, err := tmpFile.Seek(f.cursorPos, 0); err != nil { + return nil, err + } + } + } + + return tmpFile, nil +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/os/fileSystem.go b/vendor/github.com/c2fo/vfs/v6/backend/os/fileSystem.go new file mode 100644 index 000000000..415dd8140 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/os/fileSystem.go @@ -0,0 +1,57 @@ +package os + +import ( + "path" + + "github.com/c2fo/vfs/v6" + "github.com/c2fo/vfs/v6/backend" + "github.com/c2fo/vfs/v6/utils" +) + +// Scheme defines the file system type. +const Scheme = "file" +const name = "os" + +// FileSystem implements vfs.Filesystem for the OS file system. +type FileSystem struct{} + +// Retry will return a retriever provided via options, or a no-op if none is provided. +func (fs *FileSystem) Retry() vfs.Retry { + return vfs.DefaultRetryer() +} + +// NewFile function returns the os implementation of vfs.File. +func (fs *FileSystem) NewFile(volume, name string) (vfs.File, error) { + err := utils.ValidateAbsoluteFilePath(name) + if err != nil { + return nil, err + } + return &File{name: name, filesystem: fs}, nil +} + +// NewLocation function returns the os implementation of vfs.Location. +func (fs *FileSystem) NewLocation(volume, name string) (vfs.Location, error) { + err := utils.ValidateAbsoluteLocationPath(name) + if err != nil { + return nil, err + } + + return &Location{ + fileSystem: fs, + name: utils.EnsureTrailingSlash(path.Clean(name)), + }, nil +} + +// Name returns "os" +func (fs *FileSystem) Name() string { + return name +} + +// Scheme return "file" as the initial part of a file URI ie: file:// +func (fs *FileSystem) Scheme() string { + return Scheme +} + +func init() { + backend.Register(Scheme, &FileSystem{}) +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/os/location.go b/vendor/github.com/c2fo/vfs/v6/backend/os/location.go new file mode 100644 index 000000000..5c82eb44a --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/os/location.go @@ -0,0 +1,188 @@ +package os + +import ( + "errors" + "os" + "path" + "path/filepath" + "regexp" + "strings" + + "github.com/c2fo/vfs/v6" + "github.com/c2fo/vfs/v6/options" + "github.com/c2fo/vfs/v6/utils" +) + +// Location implements the vfs.Location interface specific to OS fs. +type Location struct { + name string + fileSystem vfs.FileSystem +} + +// NewFile uses the properties of the calling location to generate a vfs.File (backed by an os.File). A string +// argument is expected to be a relative path to the location's current path. +func (l *Location) NewFile(fileName string) (vfs.File, error) { + if l == nil { + return nil, errors.New("non-nil os.Location pointer is required") + } + if fileName == "" { + return nil, errors.New("non-empty string filePath is required") + } + err := utils.ValidateRelativeFilePath(fileName) + if err != nil { + return nil, err + } + fileName = utils.EnsureLeadingSlash(path.Clean(path.Join(l.name, fileName))) + return l.fileSystem.NewFile(l.Volume(), fileName) +} + +// DeleteFile deletes the file of the given name at the location. This is meant to be a short cut for instantiating a +// new file and calling delete on that with all the necessary error handling overhead. +func (l *Location) DeleteFile(fileName string, opts ...options.DeleteOption) error { + file, err := l.NewFile(fileName) + if err != nil { + return err + } + + return file.Delete(opts...) +} + +type fileTest func(fileName string) bool + +// List returns a slice of all files in the top directory of of the location. +func (l *Location) List() ([]string, error) { + return l.fileList(func(name string) bool { return true }) +} + +// ListByPrefix returns a slice of all files starting with "prefix" in the top directory of of the location. +func (l *Location) ListByPrefix(prefix string) ([]string, error) { + var loc vfs.Location + var err error + d := path.Dir(prefix) + + // if prefix has a dir component, use it's location and basename of prefix + if d != "." && d != "/" { + loc, err = l.NewLocation(utils.EnsureTrailingSlash(d)) + if err != nil { + return []string{}, err + } + prefix = path.Base(prefix) + } else { + // otherwise just use everything as-is + loc = l + } + + return loc.(*Location).fileList(func(name string) bool { + return strings.HasPrefix(name, prefix) + }) +} + +// ListByRegex returns a slice of all files matching the regex in the top directory of of the location. +func (l *Location) ListByRegex(regex *regexp.Regexp) ([]string, error) { + return l.fileList(func(name string) bool { + return regex.MatchString(name) + }) +} + +func (l *Location) fileList(testEval fileTest) ([]string, error) { + files := make([]string, 0) + exists, err := l.Exists() + if err != nil { + return files, err + } + + // Function should return an empty slice if the directory doesn't exist. This is to match behavior of remote + // systems. If the user cares about the distinction between directories that are empty, vs non-existent then + // Location.Exists() should be used first. + if exists { + entries, err := os.ReadDir(l.Path()) + if err != nil { + return files, err + } + + for _, info := range entries { + if !info.IsDir() && testEval(info.Name()) { + files = append(files, info.Name()) + } + } + } + + return files, nil +} + +// Volume returns the volume, if any, of the location. Given "C:\foo\bar" it returns "C:" on Windows. On other platforms it returns "". +func (l *Location) Volume() string { + return filepath.VolumeName(l.name) +} + +// Path returns the location path. +func (l *Location) Path() string { + return utils.EnsureLeadingSlash(utils.EnsureTrailingSlash(l.name)) +} + +// Exists returns true if the location exists, and the calling user has the appropriate +// permissions. Will receive false without an error if the location simply doesn't exist. Otherwise could receive +// false and any errors passed back from the OS. +func (l *Location) Exists() (bool, error) { + _, err := os.Stat(l.Path()) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + return true, nil +} + +// URI returns the Location's URI as a string. +func (l *Location) URI() string { + return utils.GetLocationURI(l) +} + +// String implement fmt.Stringer, returning the location's URI as the default string. +func (l *Location) String() string { + return l.URI() +} + +// NewLocation makes a copy of the underlying Location, then modifies its path by calling ChangeDir with the +// relativePath argument, returning the resulting location. The only possible errors come from the call to +// ChangeDir. +func (l *Location) NewLocation(relativePath string) (vfs.Location, error) { + if l == nil { + return nil, errors.New("non-nil os.Location pointer is required") + } + + // make a copy of the original location first, then ChangeDir, leaving the original location as-is + newLocation := &Location{} + *newLocation = *l + err := newLocation.ChangeDir(relativePath) + if err != nil { + return nil, err + } + return newLocation, nil +} + +// ChangeDir takes a relative path, and modifies the underlying Location's path. The caller is modified by this +// so the only return is any error. For this implementation there are no errors. +func (l *Location) ChangeDir(relativePath string) error { + if l == nil { + return errors.New("non-nil os.Location pointer is required") + } + if relativePath == "" { + return errors.New("non-empty string relativePath is required") + } + err := utils.ValidateRelativeLocationPath(relativePath) + if err != nil { + return err + } + + // update location path + l.name = utils.EnsureTrailingSlash(utils.EnsureLeadingSlash(path.Join(l.name, relativePath))) + + return nil +} + +// FileSystem returns a vfs.FileSystem interface of the location's underlying file system. +func (l *Location) FileSystem() vfs.FileSystem { + return l.fileSystem +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/s3/doc.go b/vendor/github.com/c2fo/vfs/v6/backend/s3/doc.go new file mode 100644 index 000000000..9460ca0c0 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/s3/doc.go @@ -0,0 +1,92 @@ +/* +Package s3 AWS S3 VFS implementation. + +# Usage + +Rely on github.com/c2fo/vfs/v6/backend + + import( + "github.com/c2fo/vfs/v6/backend" + "github.com/c2fo/vfs/v6/backend/s3" + ) + + func UseFs() error { + fs := backend.Backend(s3.Scheme) + ... + } + +Or call directly: + + import "github.com/c2fo/vfs/v6/backend/s3" + + func DoSomething() { + fs := s3.NewFileSystem() + ... + } + +s3 can be augmented with the following implementation-specific methods. Backend returns vfs.FileSystem interface so it +would have to be cast as s3.FileSystem to use the following: + + func DoSomething() { + + ... + + // cast if fs was created using backend.Backend(). Not necessary if created directly from s3.NewFileSystem(). + fs = fs.(s3.FileSystem) + + // to pass in client options + fs = fs.WithOptions( + s3.Options{ + AccessKeyID: "AKIAIOSFODNN7EXAMPLE", + SecretAccessKey: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + Region: "us-west-2", + ACL: "bucket-owner-full-control", + }, + ) + + // to pass specific client, for instance a mock client + s3apiMock := &mocks.S3API{} + s3apiMock.On("GetObject", mock.AnythingOfType("*s3.GetObjectInput")). + Return(&s3.GetObjectOutput{ + Body: nopCloser{bytes.NewBufferString("Hello world!")}, + }, nil) + fs = fs.WithClient(s3apiMock) + } + +# Object ACL + +Canned ACL's can be passed in as an Option. This string will be applied to all writes, moves, and copies. +See https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl for values. + +# Authentication + +Authentication, by default, occurs automatically when Client() is called. It looks for credentials in the following places, +preferring the first location found: + + 1. StaticProvider - set of credentials which are set programmatically, and will never expire. + + 2. EnvProvider - credentials from the environment variables of the + running process. Environment credentials never expire. + Environment variables used: + + * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY + * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY + + 3. SharedCredentialsProvider - looks for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + env value is empty will default to current user's home directory. + + * Linux/OSX: "$HOME/.aws/credentials" + * Windows: "%USERPROFILE%\.aws\credentials" + + 4. RemoteCredProvider - default remote endpoints such as EC2 or ECS IAM Roles + + 5. EC2RoleProvider - credentials from the EC2 service, and keeps track if those credentials are expired + +See the following for more auth info: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html +and https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html + +# See Also + +See: https://github.com/aws/aws-sdk-go/tree/master/service/s3 +*/ +package s3 diff --git a/vendor/github.com/c2fo/vfs/v6/backend/s3/file.go b/vendor/github.com/c2fo/vfs/v6/backend/s3/file.go new file mode 100644 index 000000000..af322f3a6 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/s3/file.go @@ -0,0 +1,860 @@ +package s3 + +import ( + "context" + "errors" + "fmt" + "io" + "net/url" + "os" + "path" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" + "github.com/aws/aws-sdk-go/service/s3/s3manager" + "github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface" + + "github.com/c2fo/vfs/v6" + "github.com/c2fo/vfs/v6/mocks" + "github.com/c2fo/vfs/v6/options" + "github.com/c2fo/vfs/v6/options/delete" + "github.com/c2fo/vfs/v6/utils" +) + +const defaultPartitionSize = int64(32 * 1024 * 1024) + +// File implements vfs.File interface for S3 fs. +type File struct { + fileSystem *FileSystem + bucket string + key string + + // seek-related fields + cursorPos int64 + seekCalled bool + + // read-related fields + reader io.ReadCloser + readCalled bool + readEOFSeen bool + + // write-related fields + tempFileWriter *os.File + s3Writer *io.PipeWriter + cancelFunc context.CancelFunc + writeCalled bool + s3WriterCompleteCh chan struct{} +} + +// Info Functions + +// LastModified returns the LastModified property of a HEAD request to the s3 object. +func (f *File) LastModified() (*time.Time, error) { + head, err := f.getHeadObject() + if err != nil { + return nil, err + } + return head.LastModified, nil +} + +// Name returns the name portion of the file's key property. IE: "file.txt" of "s3://some/path/to/file.txt +func (f *File) Name() string { + return path.Base(f.key) +} + +// Path return the directory portion of the file's key. IE: "path/to" of "s3://some/path/to/file.txt +func (f *File) Path() string { + return utils.EnsureLeadingSlash(f.key) +} + +// Exists returns whether (boolean) the object exists on s3, based on a call for +// the object's HEAD through the s3 API. +func (f *File) Exists() (bool, error) { + _, err := f.getHeadObject() + if err != nil { + if errors.Is(err, vfs.ErrNotExist) { + return false, nil + } + return false, err + } + + return true, nil +} + +// Size returns the ContentLength value from an S3 HEAD request on the file's object. +func (f *File) Size() (uint64, error) { + head, err := f.getHeadObject() + if err != nil { + return 0, err + } + return uint64(*head.ContentLength), nil +} + +// Location returns a vfs.Location at the location of the object. IE: if file is at +// s3://bucket/here/is/the/file.txt the location points to s3://bucket/here/is/the/ +func (f *File) Location() vfs.Location { + return vfs.Location(&Location{ + fileSystem: f.fileSystem, + prefix: path.Dir(f.key), + bucket: f.bucket, + }) +} + +// Move/Copy Operations + +// CopyToFile puts the contents of File into the targetFile passed. Uses the S3 CopyObject +// method if the target file is also on S3, otherwise uses io.CopyBuffer. +func (f *File) CopyToFile(file vfs.File) (err error) { + // Close file (f) reader regardless of an error + defer func() { + // close writer + wErr := file.Close() + // close reader + rErr := f.Close() + // + if err == nil { + if wErr != nil { + err = wErr + } else if rErr != nil { + err = rErr + } + } + }() + // validate seek is at 0,0 before doing copy + if f.cursorPos != 0 { + return vfs.CopyToNotPossible + } + + // if target is S3 + if tf, ok := file.(*File); ok { + input, err := f.getCopyObjectInput(tf) + if err != nil { + return err + } + // if input is not nil, use it to natively copy object + if input != nil { + client, err := f.fileSystem.Client() + if err != nil { + return err + } + _, err = client.CopyObject(input) + return err + } + } + + // Otherwise, use TouchCopyBuffered using io.CopyBuffer + fileBufferSize := 0 + + if opts, ok := f.Location().FileSystem().(*FileSystem).options.(Options); ok { + fileBufferSize = opts.FileBufferSize + } + + if err := utils.TouchCopyBuffered(file, f, fileBufferSize); err != nil { + return err + } + // Close target to flush and ensure that cursor isn't at the end of the file when the caller reopens for read + if err := file.Close(); err != nil { + return err + } + + return err +} + +// MoveToFile puts the contents of File into the targetFile passed using File.CopyToFile. +// If the copy succeeds, the source file is deleted. Any errors from the copy or delete are +// returned. +func (f *File) MoveToFile(file vfs.File) error { + if err := f.CopyToFile(file); err != nil { + return err + } + + return f.Delete() +} + +// MoveToLocation works by first calling File.CopyToLocation(vfs.Location) then, if that +// succeeds, it deletes the original file, returning the new file. If the copy process fails +// the error is returned, and the Delete isn't called. If the call to Delete fails, the error +// and the file generated by the copy are both returned. +func (f *File) MoveToLocation(location vfs.Location) (vfs.File, error) { + newFile, err := f.CopyToLocation(location) + if err != nil { + return nil, err + } + delErr := f.Delete() + return newFile, delErr +} + +// CopyToLocation creates a copy of *File, using the file's current name as the new file's +// name at the given location. If the given location is also s3, the AWS API for copying +// files will be utilized, otherwise, standard io.Copy will be done to the new file. +func (f *File) CopyToLocation(location vfs.Location) (vfs.File, error) { + newFile, err := location.NewFile(f.Name()) + if err != nil { + return nil, err + } + + return newFile, f.CopyToFile(newFile) +} + +// CRUD Operations + +// Delete clears any local temp file, or write buffer from read/writes to the file, then makes +// a DeleteObject call to s3 for the file. If DeleteAllVersions option is provided, +// DeleteObject call is made to s3 for each version of the file. Returns any error returned by the API. +func (f *File) Delete(opts ...options.DeleteOption) error { + if err := f.Close(); err != nil { + return err + } + + client, err := f.fileSystem.Client() + if err != nil { + return err + } + + var deleteAllVersions bool + for _, o := range opts { + switch o.(type) { + case delete.DeleteAllVersions: + deleteAllVersions = true + default: + } + } + + _, err = client.DeleteObject(&s3.DeleteObjectInput{ + Key: &f.key, + Bucket: &f.bucket, + }) + if err != nil { + return err + } + + if deleteAllVersions { + objectVersions, err := f.getAllObjectVersions(client) + if err != nil { + return err + } + + for _, version := range objectVersions.Versions { + if _, err = client.DeleteObject(&s3.DeleteObjectInput{ + Key: &f.key, + Bucket: &f.bucket, + VersionId: version.VersionId, + }); err != nil { + return err + } + } + } + + return err +} + +// Close cleans up underlying mechanisms for reading from and writing to the file. Closes and removes the +// local temp file, and triggers a Write to S3 of anything in the f.writeBuffer if it has been created. +func (f *File) Close() error { //nolint:gocyclo + defer func() { + f.reader = nil + f.cancelFunc = nil + f.s3Writer = nil + + // reset state + f.cursorPos = 0 + f.seekCalled = false + f.readCalled = false + f.writeCalled = false + f.readEOFSeen = false + }() + + // cleanup reader (unless reader is also the writer tempfile) + if f.reader != nil && !f.writeCalled { + // close reader + if err := f.reader.Close(); err != nil { + return utils.WrapCloseError(err) + } + } + + // finalize writer + wroteFile := false + if f.s3Writer != nil { + // close s3Writer + if err := f.s3Writer.Close(); err != nil { + return utils.WrapCloseError(err) + } + wroteFile = true + } else if f.tempFileWriter != nil { // s3Writer is nil but tempFileWriter is not nil (seek after write, write after seek) + // write tempFileWriter to s3 + if err := f.tempToS3(); err != nil { + return utils.WrapCloseError(err) + } + wroteFile = true + } + + // cleanup tempFileWriter + if f.tempFileWriter != nil { + if err := f.cleanupTempFile(); err != nil { + return utils.WrapCloseError(err) + } + } + + // wait for file to exist + if wroteFile { + // read s3WriterCompleteCh if it exists + if f.writeCalled && f.s3Writer != nil && f.s3WriterCompleteCh != nil { + // wait for s3Writer to complete + <-f.s3WriterCompleteCh + // close s3WriterCompleteCh channel + close(f.s3WriterCompleteCh) + } + err := waitUntilFileExists(f, 5) + if err != nil { + return utils.WrapCloseError(err) + } + } + + // close reader + if f.reader != nil && !f.writeCalled { + err := f.reader.Close() + if err != nil { + return utils.WrapCloseError(err) + } + } + + return nil +} + +func (f *File) tempToS3() error { + // ensure cursor is at 0 + if _, err := f.tempFileWriter.Seek(0, 0); err != nil { + return err + } + + // write tempFileWriter to s3 + client, err := f.fileSystem.Client() + if err != nil { + return err + } + + uploader := getUploader(client, withUploadPartitionSize(f.getDownloadPartitionSize())) + uploadInput := uploadInput(f) + uploadInput.Body = f.tempFileWriter + + _, err = uploader.UploadWithContext(context.Background(), uploadInput) + if err != nil { + return err + } + + return nil +} + +// Read implements the standard for io.Reader. +func (f *File) Read(p []byte) (n int, err error) { + // check/initialize for reader + r, err := f.getReader() + if err != nil { + return 0, utils.WrapReadError(err) + } + + read, err := r.Read(p) + if err != nil { + if !errors.Is(err, io.EOF) { + return 0, utils.WrapReadError(err) + } + // s3 reader returns io.EOF when reading the last byte (but not past the last byte) to save on bandwidth, + // but we want to return io.EOF only when reading past the last byte + if f.readEOFSeen { + return 0, io.EOF + } + sz, err := f.Size() + if err != nil { + return 0, utils.WrapReadError(err) + } + if f.cursorPos+int64(read) > int64(sz) { + return read, utils.WrapReadError(err) + } + f.readEOFSeen = true + } + + f.cursorPos += int64(read) + f.readCalled = true + + return read, nil +} + +func (f *File) cleanupTempFile() error { + if f.tempFileWriter != nil { + err := f.tempFileWriter.Close() + if err != nil { + return err + } + + err = os.Remove(f.tempFileWriter.Name()) + if err != nil { + return err + } + + f.tempFileWriter = nil + } + + return nil +} + +// Seek implements the standard for io.Seeker. +func (f *File) Seek(offset int64, whence int) (int64, error) { + // get length of file + var length uint64 + if f.writeCalled { + // if write has been called, then the length is the cursorPos + length = uint64(f.cursorPos) + } else { + var err error + length, err = f.Size() + if err != nil { + return 0, utils.WrapSeekError(err) + } + } + + // invalidate reader (if any) + if f.reader != nil { + err := f.reader.Close() + if err != nil { + return 0, utils.WrapSeekError(err) + } + + f.reader = nil + } + + // invalidate s3Writer + if f.s3Writer != nil { + // cancel s3Writer + f.cancelFunc() + f.cancelFunc = nil + + // close s3Writer + err := f.s3Writer.Close() + if err != nil { + return 0, utils.WrapSeekError(err) + } + + f.s3Writer = nil + } + + // update seek position for tempFileWriter writer (if any) + if f.tempFileWriter != nil { + // seek tempFileWriter + _, err := f.tempFileWriter.Seek(offset, whence) + if err != nil { + return 0, utils.WrapSeekError(err) + } + } + + // update cursorPos + pos, err := utils.SeekTo(int64(length), f.cursorPos, offset, whence) + if err != nil { + return 0, utils.WrapSeekError(err) + } + f.cursorPos = pos + + f.seekCalled = true + return f.cursorPos, nil +} + +// Write implements the standard for io.Writer. Note that writes are not committed to S3 until CLose() is called. +func (f *File) Write(data []byte) (int, error) { + // check/initialize for writer + err := f.initWriters() + if err != nil { + return 0, utils.WrapWriteError(err) + } + + // write to tempfile + written, err := f.tempFileWriter.Write(data) + if err != nil { + return 0, utils.WrapWriteError(err) + } + + // write to s3 + if f.s3Writer != nil { + // write to s3 + s3written, err := f.s3Writer.Write(data) + if err != nil { + return 0, utils.WrapWriteError(err) + } + + // ensure both writes are the same + if written != s3written { + return 0, utils.WrapWriteError( + fmt.Errorf("local write and s3 write are different sizes: local=%d, s3=%d", written, s3written), + ) + } + } + + // update cursorPos + f.cursorPos += int64(written) + f.writeCalled = true + + return written, nil +} + +// Touch creates a zero-length file on the vfs.File if no File exists. Update File's last modified timestamp. +// Returns error if unable to touch File. +func (f *File) Touch() error { + // check if file exists + exists, err := f.Exists() + if err != nil { + return err + } + + // file doesn't already exist so create it + if !exists { + _, err = f.Write([]byte("")) + if err != nil { + return err + } + + if err := f.Close(); err != nil { + return err + } + } else { + // file already exists so update its last modified date + return utils.UpdateLastModifiedByMoving(f) + } + + return nil +} + +// URI returns the File's URI as a string. +func (f *File) URI() string { + return utils.GetFileURI(f) +} + +// String implement fmt.Stringer, returning the file's URI as the default string. +func (f *File) String() string { + return f.URI() +} + +/* +Private helper functions +*/ +func (f *File) getAllObjectVersions(client s3iface.S3API) (*s3.ListObjectVersionsOutput, error) { + prefix := utils.RemoveLeadingSlash(f.key) + objVers, err := client.ListObjectVersions(&s3.ListObjectVersionsInput{ + Bucket: &f.bucket, + Prefix: &prefix, + }) + return objVers, err +} + +func (f *File) getHeadObject() (*s3.HeadObjectOutput, error) { + headObjectInput := new(s3.HeadObjectInput).SetKey(f.key).SetBucket(f.bucket) + client, err := f.fileSystem.Client() + if err != nil { + return nil, err + } + + head, err := client.HeadObject(headObjectInput) + + return head, handleExistsError(err) +} + +// For copy from S3-to-S3 when credentials are the same between source and target, return *s3.CopyObjectInput or error +func (f *File) getCopyObjectInput(targetFile *File) (*s3.CopyObjectInput, error) { + // first we must determine if we're using the same s3 credentials for source and target before doing a native copy + isSameAccount := false + var ACL string + + fileOptions := f.Location().FileSystem().(*FileSystem).options + targetOptions := targetFile.Location().FileSystem().(*FileSystem).options + + if fileOptions == nil && targetOptions == nil { + // if both opts are nil, we must be using the default credentials + isSameAccount = true + } else { + opts, hasOptions := fileOptions.(Options) + targetOpts, hasTargetOptions := targetOptions.(Options) + if hasOptions { + // use source ACL (even if empty), UNLESS target ACL is set + ACL = opts.ACL + if hasTargetOptions && targetOpts.ACL != "" { + ACL = targetOpts.ACL + } + if hasTargetOptions { + // since accesskey and session token are mutually exclusive, one will be nil + // if both are the same, we're using the same credentials + isSameAccount = (opts.AccessKeyID == targetOpts.AccessKeyID) && (opts.SessionToken == targetOpts.SessionToken) + } + } + } + + // If both files use the same account, copy with native library. Otherwise, copy to disk + // first before pushing out to the target file's location. + if isSameAccount { + // PathEscape ensures we url-encode as required by the API, including double-encoding literals + copySourceKey := url.PathEscape(path.Join(f.bucket, f.key)) + + copyInput := new(s3.CopyObjectInput). + SetServerSideEncryption("AES256"). + SetACL(ACL). + SetKey(targetFile.key). + SetBucket(targetFile.bucket). + SetCopySource(copySourceKey) + + if f.fileSystem.options != nil && f.fileSystem.options.(Options).DisableServerSideEncryption { + copyInput.ServerSideEncryption = nil + } + + // validate copyInput + if err := copyInput.Validate(); err != nil { + return nil, err + } + + return copyInput, nil + } + + // return nil if credentials aren't the same + return nil, nil +} + +func (f *File) copyS3ToLocalTempReader(tmpFile *os.File) error { + client, err := f.fileSystem.Client() + if err != nil { + return err + } + + // Download file + input := new(s3.GetObjectInput).SetBucket(f.bucket).SetKey(f.key) + opt := withDownloadPartitionSize(f.getDownloadPartitionSize()) + _, err = getDownloader(client, opt). + DownloadWithContext(context.Background(), tmpFile, input) + + return err +} + +// TODO: need to provide an implementation-agnostic container for providing config options such as SSE +func uploadInput(f *File) *s3manager.UploadInput { + sseType := "AES256" + input := &s3manager.UploadInput{ + Bucket: &f.bucket, + Key: &f.key, + ServerSideEncryption: &sseType, + } + + if f.fileSystem.options == nil { + f.fileSystem.options = Options{} + } + + if f.fileSystem.options.(Options).DisableServerSideEncryption { + input.ServerSideEncryption = nil + } + + if opts, ok := f.fileSystem.options.(Options); ok { + if opts.ACL != "" { + input.ACL = &opts.ACL + } + } + + return input +} + +// WaitUntilFileExists attempts to ensure that a recently written file is available before moving on. This is helpful for +// attempting to overcome race conditions withe S3's "eventual consistency". +// WaitUntilFileExists accepts vfs.File and an int representing the number of times to retry(once a second). +// error is returned if the file is still not available after the specified retries. +// nil is returned once the file is available. +func waitUntilFileExists(file vfs.File, retries int) error { + // Ignore in-memory VFS files + if _, ok := file.(*mocks.ReadWriteFile); ok { + return nil + } + + // Return as if file was found when retries is set to -1. Useful mainly for testing. + if retries == -1 { + return nil + } + var retryCount = 0 + for { + if retryCount == retries { + return fmt.Errorf("failed to find file %s after %d retries", file, retries) + } + + // check for existing file + found, err := file.Exists() + if err != nil { + return fmt.Errorf("unable to perform S3 exists on file %s: %s", file, err.Error()) + } + + if found { + break + } + + retryCount++ + time.Sleep(time.Second * 1) + } + + return nil +} + +func (f *File) getReader() (io.ReadCloser, error) { + if f.reader == nil { + if f.writeCalled && f.tempFileWriter != nil { + // we've edited or truncated the file, so we need to read from the temp file which should already be at the + // current cursor position + f.reader = f.tempFileWriter + } else { + sz, err := f.Size() + if err != nil { + return nil, err + } + if sz == 0 { + // can't set range on empty file, so just return an empty ReadCloser + f.reader = io.NopCloser(strings.NewReader("")) + } else { + + // Create the request to get the object + input := new(s3.GetObjectInput). + SetBucket(f.bucket). + SetKey(f.key). + SetRange(fmt.Sprintf("bytes=%d-", f.cursorPos)) + + // Get the client + client, err := f.fileSystem.Client() + if err != nil { + return nil, err + } + + // Request the object + result, err := client.GetObject(input) + if err != nil { + return nil, err + } + + // Set the reader to the body of the object + f.reader = result.Body + } + } + } + return f.reader, nil +} + +func handleExistsError(err error) error { + if err != nil { + var awsErr awserr.Error + if errors.As(err, &awsErr) { + switch awsErr.Code() { + case s3.ErrCodeNoSuchKey, s3.ErrCodeNoSuchBucket, "NotFound": + return vfs.ErrNotExist + } + } + return err + } + return nil +} + +func (f *File) initWriters() error { + if f.tempFileWriter == nil { + // Create temp file + tmpFile, err := os.CreateTemp("", fmt.Sprintf("vfs_s3_%s.%d", f.Name(), time.Now().UnixNano())) + if err != nil { + return err + } + f.tempFileWriter = tmpFile + if f.cursorPos != 0 { + // if file exists(because cursor position is non-zero), we need to copy the existing s3 file to temp + err := f.copyS3ToLocalTempReader(tmpFile) + if err != nil { + return err + } + + // seek to cursorPos + if _, err := f.tempFileWriter.Seek(f.cursorPos, 0); err != nil { + return err + } + } + } + + // if we haven't seeked yet, we need to get the s3Writer + if f.s3Writer == nil { + if !f.seekCalled && !f.readCalled { + w, err := f.getS3Writer() + if err != nil { + return err + } + + // Set the reader to the body of the object + f.s3Writer = w + } + } + + return nil +} + +func (f *File) getS3Writer() (*io.PipeWriter, error) { + f.s3WriterCompleteCh = make(chan struct{}, 1) + pr, pw := io.Pipe() + + client, err := f.fileSystem.Client() + if err != nil { + return nil, err + } + uploader := getUploader(client, withUploadPartitionSize(f.getUploadPartitionSize())) + ctx, cancel := context.WithCancel(context.Background()) + f.cancelFunc = cancel + uploadInput := uploadInput(f) + uploadInput.Body = pr + + go func(input *s3manager.UploadInput) { + defer cancel() + _, err := uploader.UploadWithContext(ctx, input) + if err != nil { + _ = pw.CloseWithError(err) + } + f.s3WriterCompleteCh <- struct{}{} + }(uploadInput) + + return pw, nil +} + +func (f *File) getUploadPartitionSize() int64 { + partSize := defaultPartitionSize + if f.fileSystem.options != nil { + if opts, ok := f.fileSystem.options.(Options); ok { + if opts.UploadPartitionSize != 0 { + partSize = opts.UploadPartitionSize + } + } + } + return partSize +} + +func (f *File) getDownloadPartitionSize() int64 { + partSize := defaultPartitionSize + if f.fileSystem.options != nil { + if opts, ok := f.fileSystem.options.(Options); ok { + if opts.DownloadPartitionSize != 0 { + partSize = opts.DownloadPartitionSize + } + } + } + return partSize +} + +func withDownloadPartitionSize(partSize int64) func(*s3manager.Downloader) { + return func(d *s3manager.Downloader) { + d.PartSize = partSize + } +} + +func withUploadPartitionSize(partSize int64) func(*s3manager.Uploader) { + return func(u *s3manager.Uploader) { + u.PartSize = partSize + } +} + +var getDownloader = func(client s3iface.S3API, opts ...func(d *s3manager.Downloader)) s3manageriface.DownloaderAPI { + return s3manager.NewDownloaderWithClient(client, opts...) +} + +var getUploader = func(client s3iface.S3API, opts ...func(d *s3manager.Uploader)) s3manageriface.UploaderAPI { + return s3manager.NewUploaderWithClient(client, opts...) +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/s3/fileSystem.go b/vendor/github.com/c2fo/vfs/v6/backend/s3/fileSystem.go new file mode 100644 index 000000000..da0120d53 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/s3/fileSystem.go @@ -0,0 +1,130 @@ +package s3 + +import ( + "errors" + "fmt" + "path" + + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" + + "github.com/c2fo/vfs/v6" + "github.com/c2fo/vfs/v6/backend" + "github.com/c2fo/vfs/v6/utils" +) + +// Scheme defines the file system type. +const Scheme = "s3" +const name = "AWS S3" + +// FileSystem implements vfs.FileSystem for the S3 file system. +type FileSystem struct { + client s3iface.S3API + options vfs.Options +} + +// Retry will return the default no-op retrier. The S3 client provides its own retryer interface, and is available +// to override via the s3.FileSystem Options type. +func (fs *FileSystem) Retry() vfs.Retry { + return vfs.DefaultRetryer() +} + +// NewFile function returns the s3 implementation of vfs.File. +func (fs *FileSystem) NewFile(volume, name string) (vfs.File, error) { + if fs == nil { + return nil, errors.New("non-nil s3.FileSystem pointer is required") + } + if volume == "" || name == "" { + return nil, errors.New("non-empty strings for bucket and key are required") + } + if err := utils.ValidateAbsoluteFilePath(name); err != nil { + return nil, err + } + + return &File{ + fileSystem: fs, + bucket: utils.RemoveTrailingSlash(volume), + key: path.Clean(name), + }, nil +} + +// NewLocation function returns the s3 implementation of vfs.Location. +func (fs *FileSystem) NewLocation(volume, name string) (vfs.Location, error) { + if fs == nil { + return nil, errors.New("non-nil s3.FileSystem pointer is required") + } + if volume == "" || name == "" { + return nil, errors.New("non-empty strings for bucket and key are required") + } + if err := utils.ValidateAbsoluteLocationPath(name); err != nil { + return nil, err + } + + return &Location{ + fileSystem: fs, + prefix: utils.EnsureTrailingSlash(path.Clean(name)), + bucket: utils.RemoveTrailingSlash(volume), + }, nil +} + +// Name returns "AWS S3" +func (fs *FileSystem) Name() string { + return name +} + +// Scheme return "s3" as the initial part of a file URI ie: s3:// +func (fs *FileSystem) Scheme() string { + return Scheme +} + +// Client returns the underlying aws s3 client, creating it, if necessary +// See Overview for authentication resolution +func (fs *FileSystem) Client() (s3iface.S3API, error) { + if fs.client == nil { + if fs.options == nil { + fs.options = Options{} + } + + if opts, ok := fs.options.(Options); ok { + var err error + fs.client, err = getClient(opts) + if err != nil { + return nil, err + } + } else { + return nil, fmt.Errorf("unable to create client, vfs.Options must be an s3.Options") + } + } + return fs.client, nil +} + +// WithOptions sets options for client and returns the file system (chainable) +func (fs *FileSystem) WithOptions(opts vfs.Options) *FileSystem { + // only set options if vfs.Options is s3.Options + if opts, ok := opts.(Options); ok { + fs.options = opts + // we set client to nil to ensure that a new client is created using the new context when Client() is called + fs.client = nil + } + return fs +} + +// WithClient passes in an s3 client and returns the file system (chainable) +func (fs *FileSystem) WithClient(client interface{}) *FileSystem { + switch client.(type) { + case *s3.S3, s3iface.S3API: + fs.client = client.(s3iface.S3API) + fs.options = nil + } + return fs +} + +// NewFileSystem initializer for FileSystem struct accepts aws-sdk S3API client and returns Filesystem or error. +func NewFileSystem() *FileSystem { + return &FileSystem{} +} + +func init() { + // registers a default FileSystem + backend.Register(Scheme, NewFileSystem()) +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/s3/location.go b/vendor/github.com/c2fo/vfs/v6/backend/s3/location.go new file mode 100644 index 000000000..730eb56cd --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/s3/location.go @@ -0,0 +1,212 @@ +package s3 + +import ( + "errors" + "path" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/s3" + + "github.com/c2fo/vfs/v6" + "github.com/c2fo/vfs/v6/options" + "github.com/c2fo/vfs/v6/utils" +) + +// Location implements the vfs.Location interface specific to S3 fs. +type Location struct { + fileSystem *FileSystem + prefix string + bucket string +} + +// List calls the s3 API to list all objects in the location's bucket, with a prefix automatically +// set to the location's path. This will make a call to the s3 API for every 1000 keys to return. +// If you have many thousands of keys at the given location, this could become quite expensive. +func (l *Location) List() ([]string, error) { + prefix := utils.RemoveLeadingSlash(l.prefix) + listObjectsInput := l.getListObjectsInput().SetPrefix(utils.EnsureTrailingSlash(prefix)) + return l.fullLocationList(listObjectsInput, prefix) +} + +// ListByPrefix calls the s3 API with the location's prefix modified relatively by the prefix arg passed to the +// function. The resource considerations of List() apply to this function as well. +func (l *Location) ListByPrefix(prefix string) ([]string, error) { + searchPrefix := utils.RemoveLeadingSlash(path.Join(l.prefix, prefix)) + d := path.Dir(searchPrefix) + listObjectsInput := l.getListObjectsInput().SetPrefix(searchPrefix) + return l.fullLocationList(listObjectsInput, d) +} + +// ListByRegex retrieves the keys of all the files at the location's current path, then filters out all those +// that don't match the given regex. The resource considerations of List() apply here as well. +func (l *Location) ListByRegex(regex *regexp.Regexp) ([]string, error) { + keys, err := l.List() + if err != nil { + return []string{}, err + } + + var filteredKeys []string + for _, key := range keys { + if regex.MatchString(key) { + filteredKeys = append(filteredKeys, key) + } + } + return filteredKeys, nil +} + +// Volume returns the bucket the location is contained in. +func (l *Location) Volume() string { + return l.bucket +} + +// Path returns the prefix the location references in most s3 calls. +func (l *Location) Path() string { + return utils.EnsureLeadingSlash(utils.EnsureTrailingSlash(l.prefix)) +} + +// Exists returns true if the bucket exists, and the user in the underlying s3.fileSystem.Client() has the appropriate +// permissions. Will receive false without an error if the bucket simply doesn't exist. Otherwise could receive +// false and any errors passed back from the API. +func (l *Location) Exists() (bool, error) { + headBucketInput := new(s3.HeadBucketInput).SetBucket(l.bucket) + client, err := l.fileSystem.Client() + if err != nil { + return false, err + } + _, err = client.HeadBucket(headBucketInput) + if err != nil { + if err.(awserr.Error).Code() == s3.ErrCodeNoSuchBucket { + return false, nil + } + return false, err + } + + return true, err +} + +// NewLocation makes a copy of the underlying Location, then modifies its path by calling ChangeDir with the +// relativePath argument, returning the resulting location. The only possible errors come from the call to +// ChangeDir, which, for the s3 implementation doesn't ever result in an error. +func (l *Location) NewLocation(relativePath string) (vfs.Location, error) { + if l == nil { + return nil, errors.New("non-nil s3.Location pointer is required") + } + + // make a copy of the original location first, then ChangeDir, leaving the original location as-is + newLocation := &Location{} + *newLocation = *l + err := newLocation.ChangeDir(relativePath) + if err != nil { + return nil, err + } + return newLocation, nil +} + +// ChangeDir takes a relative path, and modifies the underlying Location's path. The caller is modified by this +// so the only return is any error. For this implementation there are no errors. +func (l *Location) ChangeDir(relativePath string) error { + if l == nil { + return errors.New("non-nil s3.Location pointer is required") + } + if relativePath == "" { + return errors.New("non-empty string relativePath is required") + } + err := utils.ValidateRelativeLocationPath(relativePath) + if err != nil { + return err + } + l.prefix = utils.EnsureLeadingSlash(utils.EnsureTrailingSlash(path.Join(l.prefix, relativePath))) + return nil +} + +// NewFile uses the properties of the calling location to generate a vfs.File (backed by an s3.File). The filePath +// argument is expected to be a relative path to the location's current path. +func (l *Location) NewFile(filePath string) (vfs.File, error) { + if l == nil { + return nil, errors.New("non-nil s3.Location pointer is required") + } + if filePath == "" { + return nil, errors.New("non-empty string filePath is required") + } + err := utils.ValidateRelativeFilePath(filePath) + if err != nil { + return nil, err + } + newFile := &File{ + fileSystem: l.fileSystem, + bucket: l.bucket, + key: utils.EnsureLeadingSlash(path.Join(l.prefix, filePath)), + } + return newFile, nil +} + +// DeleteFile removes the file at fileName path. +func (l *Location) DeleteFile(fileName string, opts ...options.DeleteOption) error { + file, err := l.NewFile(fileName) + if err != nil { + return err + } + + return file.Delete(opts...) +} + +// FileSystem returns a vfs.FileSystem interface of the location's underlying file system. +func (l *Location) FileSystem() vfs.FileSystem { + return l.fileSystem +} + +// URI returns the Location's URI as a string. +func (l *Location) URI() string { + return utils.GetLocationURI(l) +} + +// String implement fmt.Stringer, returning the location's URI as the default string. +func (l *Location) String() string { + return l.URI() +} + +/* + Private helpers +*/ + +func (l *Location) fullLocationList(input *s3.ListObjectsInput, prefix string) ([]string, error) { + var keys []string + client, err := l.fileSystem.Client() + if err != nil { + return keys, err + } + for { + listObjectsOutput, err := client.ListObjects(input) + if err != nil { + return []string{}, err + } + newKeys := getNamesFromObjectSlice(listObjectsOutput.Contents, utils.EnsureTrailingSlash(utils.RemoveLeadingSlash(prefix))) + keys = append(keys, newKeys...) + + // if s3 response "IsTruncated" we need to call List again with + // an updated Marker (s3 version of paging) + if *listObjectsOutput.IsTruncated { + input.SetMarker(*listObjectsOutput.NextMarker) + } else { + break + } + } + + return keys, nil +} + +func (l *Location) getListObjectsInput() *s3.ListObjectsInput { + return new(s3.ListObjectsInput).SetBucket(l.bucket).SetDelimiter("/") +} + +func getNamesFromObjectSlice(objects []*s3.Object, locationPrefix string) []string { + var keys []string + for _, object := range objects { + if *object.Key != locationPrefix { + keys = append(keys, strings.TrimPrefix(*object.Key, locationPrefix)) + } + } + return keys +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/s3/options.go b/vendor/github.com/c2fo/vfs/v6/backend/s3/options.go new file mode 100644 index 000000000..a5039ad67 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/s3/options.go @@ -0,0 +1,138 @@ +package s3 + +import ( + "net/http" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" +) + +// Options holds s3-specific options. Currently only client options are used. +type Options struct { + AccessKeyID string `json:"accessKeyId,omitempty"` + SecretAccessKey string `json:"secretAccessKey,omitempty"` + SessionToken string `json:"sessionToken,omitempty"` + Region string `json:"region,omitempty"` + Endpoint string `json:"endpoint,omitempty"` + ACL string `json:"acl,omitempty"` + ForcePathStyle bool `json:"forcePathStyle,omitempty"` + DisableServerSideEncryption bool `json:"disableServerSideEncryption,omitempty"` + Retry request.Retryer + MaxRetries int + FileBufferSize int // Buffer size in bytes used with utils.TouchCopyBuffered + DownloadPartitionSize int64 // Partition size in bytes used to multipart download of large files using s3manager.Downloader + UploadPartitionSize int64 // Partition size in bytes used to multipart upload of large files using s3manager.Uploader +} + +// getClient setup S3 client +func getClient(opt Options) (s3iface.S3API, error) { + + // setup default config + awsConfig := defaults.Config() + + // setup region using opt or env + if opt.Region != "" { + awsConfig.WithRegion(opt.Region) + } else if val, ok := os.LookupEnv("AWS_DEFAULT_REGION"); ok { + awsConfig.WithRegion(val) + } + + // set filepath for minio users + if opt.ForcePathStyle { + awsConfig.S3ForcePathStyle = &opt.ForcePathStyle + } + + // use specific endpoint, otherwise, will use aws "default endpoint resolver" based on region + awsConfig.WithEndpoint(opt.Endpoint) + + if opt.Retry != nil { + awsConfig.Retryer = opt.Retry + } + + // set up credential provider chain + credentialProviders, err := initCredentialProviderChain(opt) + if err != nil { + return nil, err + } + awsConfig.WithCredentials( + credentials.NewChainCredentials(credentialProviders), + ) + + // create new session with config + s, err := session.NewSessionWithOptions( + session.Options{ + Config: *awsConfig, + }, + ) + if err != nil { + return nil, err + } + + // return client instance + return s3.New(s), nil +} + +// initCredentialProviderChain returns an array of credential providers that will be used, in order, to attempt authentication +func initCredentialProviderChain(opt Options) ([]credentials.Provider, error) { + p := make([]credentials.Provider, 0) + + // A StaticProvider is a set of credentials which are set programmatically, + // and will never expire. + if opt.AccessKeyID != "" && opt.SecretAccessKey != "" { + // Make the auth + v := credentials.Value{ + AccessKeyID: opt.AccessKeyID, + SecretAccessKey: opt.SecretAccessKey, + SessionToken: opt.SessionToken, + } + p = append(p, &credentials.StaticProvider{Value: v}) + } + + // A EnvProvider retrieves credentials from the environment variables of the + // running process. Environment credentials never expire. + // + // Environment variables used: + // + // * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY + // + // * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY + p = append(p, &credentials.EnvProvider{}) //nolint:gocritic // appendCombine + + // Path to the shared credentials file. + // + // SharedCredentialsProvider will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + p = append(p, &credentials.SharedCredentialsProvider{}) + + lowTimeoutClient := &http.Client{Timeout: 1 * time.Second} // low timeout to ec2 metadata service + + // RemoteCredProvider for default remote endpoints such as EC2 or ECS IAM Roles + def := defaults.Get() + def.Config.HTTPClient = lowTimeoutClient + p = append(p, defaults.RemoteCredProvider(*def.Config, def.Handlers)) + + // EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if those credentials are expired + sess, err := session.NewSession() + if err != nil { + return nil, err + } + p = append(p, &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(sess, &aws.Config{ + HTTPClient: lowTimeoutClient, + }), + ExpiryWindow: 3, + }) + + return p, nil +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/sftp/doc.go b/vendor/github.com/c2fo/vfs/v6/backend/sftp/doc.go new file mode 100644 index 000000000..2908c34e2 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/sftp/doc.go @@ -0,0 +1,202 @@ +/* +Package sftp SFTP VFS implementation. + +# Usage + +Rely on github.com/c2fo/vfs/v6/backend + + import( + "github.com/c2fo/vfs/v6/backend" + "github.com/c2fo/vfs/v6/backend/sftp" + ) + + func UseFs() error { + fs := backend.Backend(sftp.Scheme) + ... + } + +Or call directly: + + import "github.com/c2fo/vfs/v6/backend/sftp" + + func DoSomething() { + fs := sftp.NewFilesystem() + + location, err := fs.NewLocation("myuser@server.com:22", "/some/path/") + if err != nil { + #handle error + } + ... + } + +sftp can be augmented with some implementation-specific methods. Backend returns vfs.Filesystem interface so it +would have to be cast as sftp.Filesystem to use them. + +These methods are chainable: +(*FileSystem) WithClient(client interface{}) *FileSystem +(*FileSystem) WithOptions(opts vfs.Options) *FileSystem + + func DoSomething() { + + // cast if fs was created using backend.Backend(). Not necessary if created directly from sftp.NewFilesystem(). + fs := backend.Backend(sftp.Scheme) + fs = fs.(*sftp.Filesystem) + + // to pass specific client + sshClient, err := ssh.Dial("tcp", "myuser@server.com:22", &ssh.ClientConfig{ + User: "someuser", + Auth: []ssh.AuthMethod{ssh.Password("mypassword")}, + HostKeyCallback: ssh.InsecureIgnoreHostKey, + }) + #handle error + client, err := _sftp.NewClient(sshClient) + #handle error + + fs = fs.WithClient(client) + + // to pass in client options. See Options for more info. Note that changes to Options will make nil any client. + // This behavior ensures that changes to settings will get applied to a newly created client. + fs = fs.WithOptions( + sftp.Options{ + KeyFilePath: "/home/Bob/.ssh/id_rsa", + KeyPassphrase: "s3cr3t", + KnownHostsCallback: ssh.InsecureIgnoreHostKey, + }, + ) + + location, err := fs.NewLocation("myuser@server.com:22", "/some/path/") + #handle error + + file := location.NewFile("myfile.txt") + #handle error + + _, err := file.Write([]bytes("some text") + #handle error + + err := file.Close() + #handle error + + } + +# Authentication + +Authentication, by default, occurs automatically when Client() is called. Since user is part of the URI authority section +(Volume), auth is handled slightly differently than other vfs backends. + +A client is initialized lazily, meaning we only make a connection to the server at the last moment so we are free to modify +options until then. The authenticated session is closed any time WithOption(), WithClient(), or Close() occurs. Currently, +that means that closing a file belonging to an fs will break the connection of any other open file on the same fs. + +# USERNAME + +User may only be set in the URI authority section (Volume in vfs parlance). + + scheme host + __/ ___/____ port + / \ / \ /\ + sftp://someuser@server.com:22/path/to/file.txt + \____________________/ \______________/ + \______/ \ \ + / authority section path + username (Volume) + +sftp vfs backend accepts either a password or an ssh key, with or without a passphrase. + +PASSWORD/PASSPHRASE + +Passwords may be passed via Options.Password or via the environmental variable VFS_SFTP_PASSWORD. + +SSH keys may be passed via Options.KeyFilePath and (optionally) Options.KeyPassphrase. They can also be passed via +environmental variables VFS_SFTP_KEYFILE and VFS_SFTP_KEYFILE_PASSPHRASE, respectively. + +# KNOWN HOSTS + +Known hosts ensures that the server you're connecting to hasn't been somehow redirected to another server, collecting +your info (man-in-the-middle attack). Handling for this can be accomplished via: + 1. Options.KnownHostsString which accepts a string. + 2. Options.KnownHostsFile or environmental variable VFS_SFTP_KNOWN_HOSTS_FILE which accepts a path to a known_hosts file. + 3. Options.KnownHostsCallback which allows you to specify any of the ssh.AuthMethod functions. Environmental variable + VFS_SFTP_INSECURE_KNOWN_HOSTS will set this callback function to ssh.InsecureIgnoreHostKey which may be helpful + for testing but should not be used in production. + 4. Defaults to trying to find and use /.ssh/known_hosts. + For unix, system-wide location /etc/ssh/.ssh/known hosts is also checked. + SSH doesn't exist natively on Windows and each third-party implementation has a different location for known_hosts. Because + of this, no attempt is made to find a system-wide file for Windows. It's better to specify in KnownHostsFile in that case. + +# OTHER OPTIONS + +Passing in multiple host key algorithms, key exchange algorithms is supported - these are specified as string slices. +Example: + + fs = fs.WithOptions( + sftp.Options{ + KeyExchanges: []string{ "diffie-hellman-group-a256", "ecdh-sha2-nistp256" }, + Ciphers: []string{ "aes256-ctr", "aes192-ctr", "aes128-ctr" }, + MACs: []string{ "hmac-sha2-256", "hmac-sha2-512" }, + HostKeyAlgorithms: []string{ "ssh-rsa", "ssh-ed25519" }, + // other settings + }, + ) + +# FilePermissions + +The `FilePermissions` option allows you to specify the file permissions for files created or modified using the SFTP backend. +These permissions will override the sftp server or underlying filesystem's umask (default permissions). Permissions should +be specified using an octal literal (e.g., `0777` for full read, write, and execute permissions for all users). + +Example: + + fs = fs.WithOptions( + sftp.Options{ + FilePermissions: "0777", // Correctly specify permissions as octal (in string form) + // other settings + }, + ) + +When a file is opened for Write() or Touch()'d, the specified `FilePermissions` will be applied to the file. + +# AutoDisconnect + +When dialing a TCP connection, Go doesn't disconnect for you. This is true even when the connection falls out of scope, and even when +garbage collection is forced. The connection must be explicitly closed. Unfortunately, VFS.FileSystem has no explicit close mechanism. + +Instead, the SFTP backend will automatically disconnect 10 seconds (default) after connection. This disconnect timer is +canceled anytime a server-side request (like list, read, etc) is made. Once the request has completed, a new timer will begin. +If the timer expires (because it is not interrupted by any request), the server connection will be closed. Any subsequent server +request will first reconnect, perform the request, and start a new disconnect timer. + +Options.AutoDisconnect accepts an integer representing the number seconds before disconnecting after being idle. +Default value is 10 seconds. + +Any server request action using the same underlying FileSystem (and therefore sftp client), will reset the timer. This +should be the most desirable behavior. + + func doSFTPStuff() { + fs := sftp.NewFilesystem() + loc, err := fs.NewLocation("myuser@server.com:22", "/some/path/") + file1, _ := loc.NewFile("file1.txt") + file2, _ := loc.NewFile("file2.txt") + file1.Touch() // "touches" file and starts disconnect timer (default: 10sec) + _, _ := loc.List() // stops timer, does location listing, resets timer to 10 seconds + file2.Touch() // stops timer, "touches" file2, resets timer to 10 seconds + time.Sleep(time.Duration(15) * time.Second) // pause for 15 seconds, disconnects for server after 10 seconds + _, _ := loc.List() // reconnects, does location listing, starts new disconnect timer + return + } + + func main { + // call our sftp function + doSFTPStuff() + // even though the vfs sftp objects have fallen out of scope, our connection remains UNTIL the timer counts down + + // do more work (that take longer than 10 seconds + doOtherTimeConsumingStuff() + + // at some point during the above, the sftp connection will have closed + } + +NOTE: AutoDisconnect has nothing to do with "keep alive". Here we're only concerned with releasing resources, not keeping +the server from disconnecting us. If that is something you want, you'd have to implement yourself, injecting your own +client using WithClient(). +*/ +package sftp diff --git a/vendor/github.com/c2fo/vfs/v6/backend/sftp/file.go b/vendor/github.com/c2fo/vfs/v6/backend/sftp/file.go new file mode 100644 index 000000000..cc800328b --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/sftp/file.go @@ -0,0 +1,570 @@ +package sftp + +import ( + "errors" + "fmt" + "io" + "os" + "path" + "time" + + "github.com/c2fo/vfs/v6" + "github.com/c2fo/vfs/v6/options" + "github.com/c2fo/vfs/v6/utils" +) + +// File implements vfs.File interface for SFTP fs. +type File struct { + fileSystem *FileSystem + Authority utils.Authority + path string + sftpfile ReadWriteSeekCloser + opener fileOpener + seekCalled bool + readCalled bool + flagsUsed int +} + +// this type allows for injecting a mock fileOpener function +type fileOpener func(c Client, p string, f int) (ReadWriteSeekCloser, error) + +// Info Functions + +// LastModified returns the LastModified property of sftp file. +func (f *File) LastModified() (*time.Time, error) { + client, err := f.fileSystem.Client(f.Authority) + if err != nil { + return nil, err + } + // start timer once action is completed + defer f.fileSystem.connTimerStart() + + userinfo, err := client.Stat(f.Path()) + if err != nil { + return nil, err + } + t := userinfo.ModTime() + return &t, nil +} + +// Name returns the path portion of the file's path property. IE: "file.txt" of "sftp://someuser@host.com/some/path/to/file.txt +func (f *File) Name() string { + return path.Base(f.path) +} + +// Path returns the directory portion of the file's path. IE: "path/to" of "sftp://someuser@host.com/some/path/to/file.txt +func (f *File) Path() string { + return utils.EnsureLeadingSlash(f.path) +} + +// Exists returns a boolean of whether the file exists on the sftp server +func (f *File) Exists() (bool, error) { + client, err := f.fileSystem.Client(f.Authority) + if err != nil { + return false, err + } + // start timer once action is completed + defer f.fileSystem.connTimerStart() + + _, err = client.Stat(f.Path()) + if err != nil && errors.Is(err, os.ErrNotExist) { + return false, nil + } else if err != nil { + return false, err + } + + return true, nil +} + +// Touch creates a zero-length file on the vfs.File if no File exists. Update File's last modified timestamp. +// Returns error if unable to touch File. +func (f *File) Touch() error { + // restart timer once action is completed + f.fileSystem.connTimerStop() + defer f.fileSystem.connTimerStart() + + exists, err := f.Exists() + if err != nil { + return err + } + + if !exists { + file, err := f.openFile(os.O_WRONLY | os.O_CREATE) + if err != nil { + return err + } + f.sftpfile = file + return f.Close() + } + + client, err := f.fileSystem.Client(f.Authority) + if err != nil { + return err + } + + // set permissions if default permissions are set + err = f.setPermissions(client, f.fileSystem.options) + if err != nil { + return err + } + + // update last accessed and last modified times + now := time.Now() + return client.Chtimes(f.Path(), now, now) +} + +// Size returns the size of the remote file. +func (f *File) Size() (uint64, error) { + client, err := f.fileSystem.Client(f.Authority) + if err != nil { + return 0, err + } + // start timer once action is completed + defer f.fileSystem.connTimerStart() + + userinfo, err := client.Stat(f.Path()) + if err != nil { + return 0, err + } + return uint64(userinfo.Size()), nil +} + +// Location returns a vfs.Location at the location of the file. IE: if file is at +// sftp://someuser@host.com/here/is/the/file.txt the location points to sftp://someuser@host.com/here/is/the/ +func (f *File) Location() vfs.Location { + return &Location{ + fileSystem: f.fileSystem, + path: path.Dir(f.path), + Authority: f.Authority, + } +} + +// Move/Copy Operations + +// MoveToFile puts the contents of File into the targetFile passed using File.CopyToFile. +// If the copy succeeds, the source file is deleted. Any errors from the copy or delete are +// returned. +// If the given location is also sftp AND for the same user and host, the sftp Rename method is used, otherwise +// we'll do a an io.Copy to the destination file then delete source file. +func (f *File) MoveToFile(t vfs.File) error { + // sftp rename if vfs is sftp and for the same user/host + if f.fileSystem.Scheme() == t.Location().FileSystem().Scheme() && + f.Authority.UserInfo().Username() == t.(*File).Authority.UserInfo().Username() && + f.Authority.HostPortStr() == t.(*File).Authority.HostPortStr() { + // ensure destination exists before moving + exists, err := t.Location().Exists() + if err != nil { + return err + } + if !exists { + // it doesn't matter which client we use since they are effectively the same + client, err := f.fileSystem.Client(f.Authority) + if err != nil { + return err + } + // start timer once action is completed + defer f.fileSystem.connTimerStart() + + err = client.MkdirAll(t.Location().Path()) + if err != nil { + return err + } + } + + // check if file already exists in the destination and delete if exists + exists, err = t.Exists() + if err != nil { + return err + } + if exists { + err := t.Delete() + if err != nil { + return err + } + } + + return f.sftpRename(t.(*File)) + } + + // otherwise do copy-delete + if err := f.CopyToFile(t); err != nil { + return err + } + return f.Delete() +} + +// MoveToLocation works by creating a new file on the target location then calling MoveToFile() on it. +func (f *File) MoveToLocation(location vfs.Location) (vfs.File, error) { + + newFile, err := location.NewFile(f.Name()) + if err != nil { + return nil, err + } + + return newFile, f.MoveToFile(newFile) +} + +// CopyToFile puts the contents of File into the targetFile passed. +func (f *File) CopyToFile(file vfs.File) (err error) { + + // Close file (f) reader regardless of an error + defer func() { + // close writer + wErr := file.Close() + // close reader + rErr := f.Close() + // + if err == nil { + if wErr != nil { + err = wErr + } else if rErr != nil { + err = rErr + } + } + }() + + fileBufferSize := 0 + + if opts, ok := f.Location().FileSystem().(*FileSystem).options.(Options); ok { + fileBufferSize = opts.FileBufferSize + } + + if err := utils.TouchCopyBuffered(file, f, fileBufferSize); err != nil { + return err + } + + // Close target to flush and ensure that cursor isn't at the end of the file when the caller reopens for read + if cerr := file.Close(); cerr != nil { + return cerr + } + + return err +} + +// CopyToLocation creates a copy of *File, using the file's current path as the new file's +// path at the given location. +func (f *File) CopyToLocation(location vfs.Location) (vfs.File, error) { + + newFile, err := location.NewFile(f.Name()) + if err != nil { + return nil, err + } + + return newFile, f.CopyToFile(newFile) +} + +// CRUD Operations + +// Delete removes the remote file. Error is returned, if any. +func (f *File) Delete(opts ...options.DeleteOption) error { + client, err := f.fileSystem.Client(f.Authority) + if err != nil { + return err + } + // start timer once action is completed + defer f.fileSystem.connTimerStart() + + return client.Remove(f.Path()) +} + +// Close calls the underlying sftp.File Close, if opened, and clears the internal pointer +func (f *File) Close() error { + // restart timer once action is completed + f.fileSystem.connTimerStop() + defer f.fileSystem.connTimerStart() + + f.seekCalled = false + f.readCalled = false + + if f.sftpfile != nil { + err := f.sftpfile.Close() + if err != nil { + return utils.WrapCloseError(err) + } + f.sftpfile = nil + } + + // no op for unopened file + return nil +} + +// Read calls the underlying sftp.File Read. +func (f *File) Read(p []byte) (n int, err error) { + // restart timer once action is completed + f.fileSystem.connTimerStop() + defer f.fileSystem.connTimerStart() + + sftpfile, err := f.openFile(os.O_RDONLY) + if err != nil { + return 0, utils.WrapReadError(err) + } + + f.readCalled = true + + read, err := sftpfile.Read(p) + if err != nil { + // if we got io.EOF, we'll return the read and the EOF error + // because io.Copy looks for EOF to determine if it's done + // and doesn't support error wrapping + if errors.Is(err, io.EOF) { + return read, io.EOF + } + return read, utils.WrapReadError(err) + } + + return read, nil +} + +// Seek calls the underlying sftp.File Seek. +func (f *File) Seek(offset int64, whence int) (int64, error) { + // restart timer once action is completed + f.fileSystem.connTimerStop() + defer f.fileSystem.connTimerStart() + + sftpfile, err := f.openFile(os.O_RDONLY) + if err != nil { + return 0, utils.WrapSeekError(err) + } + + f.seekCalled = true + pos, err := sftpfile.Seek(offset, whence) + if err != nil { + return pos, utils.WrapSeekError(err) + } + + return pos, nil +} + +// Write calls the underlying sftp.File Write. +func (f *File) Write(data []byte) (res int, err error) { + // restart timer once action is completed + f.fileSystem.connTimerStop() + defer f.fileSystem.connTimerStart() + + // unless seek or read is called first, writes should replace a file (not edit) + // writes should edit a file if seek or read is called first + flags := os.O_WRONLY | os.O_CREATE + if !f.readCalled && !f.seekCalled { + flags |= os.O_TRUNC + } + + sftpfile, err := f.openFile(flags) + if err != nil { + return 0, utils.WrapWriteError(err) + } + + b, err := sftpfile.Write(data) + if err != nil { + return b, utils.WrapWriteError(err) + } + return b, nil +} + +// URI returns the File's URI as a string. +func (f *File) URI() string { + loc := f.Location().(*Location) + return utils.EncodeURI( + f.fileSystem.Scheme(), + loc.Authority.UserInfo().Username(), + loc.Authority.HostPortStr(), + f.Path(), + ) +} + +// String implement fmt.Stringer, returning the file's URI as the default string. +func (f *File) String() string { + return f.URI() +} + +/* + Private helper functions +*/ + +// openFile wrapper allows us to inject a file opener (for mocking) vs the defaultOpenFile. +func (f *File) openFile(flags int) (ReadWriteSeekCloser, error) { //nolint:gocyclo // this function is complex by nature + if f.sftpfile != nil { + + // this case shouldn't normally exist except when we've set our own ReadWriteSeekCloser in tests + if f.flagsUsed == 0 && !f.readCalled && !f.seekCalled { + return f.sftpfile, nil + } + + needRw := false + // check if the file is not already open in read-write mode + if (f.flagsUsed & os.O_RDWR) == 0 { + + // check if currently open for read only but now need write + if f.flagsUsed == os.O_RDONLY && (flags&(os.O_WRONLY|os.O_RDWR)) != 0 { + needRw = true + } + + // check if currently open for write only but now need read + if (f.flagsUsed&(os.O_WRONLY|os.O_RDWR)) != 0 && flags == os.O_RDONLY { + needRw = true + } + + // set up new flags + newFlags := os.O_RDWR + if flags&os.O_CREATE != 0 { + newFlags |= os.O_CREATE + } + if flags&os.O_TRUNC != 0 { + newFlags |= os.O_TRUNC + } + + flags = newFlags + } + + // if we're trying to open a file for writing and it's already open for read, repoen it for read/write and + // seek to current position + if needRw { + var pos int64 + + // capture current position if file is open for read (only in edit mode) + if f.readCalled || f.seekCalled { + var err error + // get current position + pos, err = f.sftpfile.Seek(0, io.SeekCurrent) + if err != nil { + return nil, err + } + } + + // close file + if err := f.sftpfile.Close(); err != nil { + return nil, err + } + + // reopen file for read/write + file, err := f._open(flags) + if err != nil { + return nil, err + } + + // seek to current position (only in edit mode) + if f.readCalled || f.seekCalled { + if _, err := file.Seek(pos, io.SeekStart); err != nil { + return nil, err + } + } + + f.flagsUsed = flags + f.sftpfile = file + + } + return f.sftpfile, nil + } + + file, err := f._open(flags) + if err != nil { + return nil, err + } + + f.flagsUsed = flags + f.sftpfile = file + + return file, nil +} + +func (f *File) _open(flags int) (ReadWriteSeekCloser, error) { + client, err := f.fileSystem.Client(f.Authority) + if err != nil { + return nil, err + } + // normally we'd do a defer of fs connTimerStart() here but not necessary since we handle it in the openFile caller + + if flags&os.O_CREATE != 0 { + // vfs specifies that all implementations make dir path if it doesn't exist + err = client.MkdirAll(path.Dir(f.path)) + if err != nil { + return nil, err + } + } + + var opener fileOpener + if f.opener != nil { + opener = f.opener + } else { + opener = defaultOpenFile + } + + rwsc, err := opener(client, f.Path(), flags) + if err != nil { + return nil, err + } + + // chmod file if default permissions are set and opening for write + if flags&os.O_WRONLY != 0 { + err = f.setPermissions(client, f.fileSystem.options) + if err != nil { + return nil, err + } + } + + return rwsc, nil +} + +// setPermissions sets the file permissions if they are set in the options +func (f *File) setPermissions(client Client, opts vfs.Options) error { + if opts == nil { + return nil + } + + // ensure we're dealing with pointer to Options + ptrOpts, ok := opts.(*Options) + if !ok { + p := opts.(Options) + ptrOpts = &p + } + + // if file permissions are not set, return early + if ptrOpts.FilePermissions == nil { + return nil + } + + // get file mode + perms, err := ptrOpts.GetFileMode() + if err != nil { + return fmt.Errorf("get file mode err: %w", err) + } + + if err := client.Chmod(f.Path(), *perms); err != nil { + return fmt.Errorf("chmod err: %w", err) + } + + return nil +} + +// defaultOpenFile uses sftp.Client to open a file and returns an sftp.File +func defaultOpenFile(c Client, p string, f int) (ReadWriteSeekCloser, error) { + return c.OpenFile(p, f) +} + +func (f *File) sftpRename(target *File) error { + client, err := f.fileSystem.Client(f.Authority) + if err != nil { + return err + } + // start timer once action is completed + defer f.fileSystem.connTimerStart() + + if err := client.Rename(f.Path(), target.Path()); err != nil { + return err + } + return nil +} + +// ReadWriteSeekCloser is a read write seek closer interface representing capabilities needed from std libs sftp File struct. +type ReadWriteSeekCloser interface { + io.ReadWriteSeeker + io.Closer + // sftp.File also provides the following which we don't use (but could): + // + // io.WriterTo + // io.ReaderFrom + // func (f *File) Chmod(mode os.FileMode) error + // func (f *File) Chown(uid, gid int) error + // func (f *File) Name() string + // func (f *File) Stat() (os.FileInfo, error) + // func (f *File) Truncate(size int64) error +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/sftp/fileSystem.go b/vendor/github.com/c2fo/vfs/v6/backend/sftp/fileSystem.go new file mode 100644 index 000000000..3c2fdbee9 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/sftp/fileSystem.go @@ -0,0 +1,200 @@ +package sftp + +import ( + "errors" + "fmt" + "io" + "os" + "path" + "sync" + "time" + + _sftp "github.com/pkg/sftp" + "golang.org/x/crypto/ssh" + + "github.com/c2fo/vfs/v6" + "github.com/c2fo/vfs/v6/backend" + "github.com/c2fo/vfs/v6/utils" +) + +// Scheme defines the filesystem type. +const Scheme = "sftp" +const name = "Secure File Transfer Protocol" +const defaultAutoDisconnectDuration = 10 + +var defaultClientGetter func(utils.Authority, Options) (Client, io.Closer, error) + +// FileSystem implements vfs.Filesystem for the SFTP filesystem. +type FileSystem struct { + options vfs.Options + sftpclient Client + sshConn io.Closer + timerMutex sync.Mutex + connTimer *time.Timer +} + +// Retry will return the default no-op retrier. The SFTP client provides its own retryer interface, and is available +// to override via the sftp.FileSystem Options type. +func (fs *FileSystem) Retry() vfs.Retry { + return vfs.DefaultRetryer() +} + +// NewFile function returns the SFTP implementation of vfs.File. +func (fs *FileSystem) NewFile(authority, filePath string) (vfs.File, error) { + if fs == nil { + return nil, errors.New("non-nil sftp.FileSystem pointer is required") + } + if filePath == "" { + return nil, errors.New("non-empty string for path is required") + } + if err := utils.ValidateAbsoluteFilePath(filePath); err != nil { + return nil, err + } + + auth, err := utils.NewAuthority(authority) + if err != nil { + return nil, err + } + + return &File{ + fileSystem: fs, + Authority: auth, + path: path.Clean(filePath), + }, nil +} + +// NewLocation function returns the SFTP implementation of vfs.Location. +func (fs *FileSystem) NewLocation(authority, locPath string) (vfs.Location, error) { + if fs == nil { + return nil, errors.New("non-nil sftp.FileSystem pointer is required") + } + if err := utils.ValidateAbsoluteLocationPath(locPath); err != nil { + return nil, err + } + + auth, err := utils.NewAuthority(authority) + if err != nil { + return nil, err + } + + return &Location{ + fileSystem: fs, + path: utils.EnsureTrailingSlash(path.Clean(locPath)), + Authority: auth, + }, nil +} + +// Name returns "Secure File Transfer Protocol" +func (fs *FileSystem) Name() string { + return name +} + +// Scheme return "sftp" as the initial part of a file URI ie: sftp:// +func (fs *FileSystem) Scheme() string { + return Scheme +} + +// Client returns the underlying sftp client, creating it, if necessary +// See Overview for authentication resolution +func (fs *FileSystem) Client(authority utils.Authority) (Client, error) { + // first stop connection timer, if any + fs.connTimerStop() + if fs.sftpclient == nil { + if fs.options == nil { + fs.options = Options{} + } + + opts, ok := fs.options.(Options) + if !ok { + return nil, fmt.Errorf("unable to create client, vfs.Options must be an sftp.Options") + } + var err error + fs.sftpclient, fs.sshConn, err = defaultClientGetter(authority, opts) + if err != nil { + return nil, err + } + } + return fs.sftpclient, nil +} + +func (fs *FileSystem) connTimerStart() { + fs.timerMutex.Lock() + defer fs.timerMutex.Unlock() + + aliveSec := defaultAutoDisconnectDuration + if fs.options != nil { + if v, ok := fs.options.(Options); ok && v.AutoDisconnect != 0 { + aliveSec = v.AutoDisconnect + } + } + + fs.connTimer = time.AfterFunc(time.Duration(aliveSec)*time.Second, func() { + // close connection and nil-ify client to force lazy reconnect + if fs.sftpclient != nil { + _ = fs.sftpclient.Close() + fs.sftpclient = nil + } + + if fs.sshConn != nil { + _ = fs.sshConn.Close() + fs.sshConn = nil + } + }) +} + +func (fs *FileSystem) connTimerStop() { + fs.timerMutex.Lock() + defer fs.timerMutex.Unlock() + if fs.connTimer != nil { + fs.connTimer.Stop() + fs.connTimer = nil + } +} + +// WithOptions sets options for client and returns the filesystem (chainable) +func (fs *FileSystem) WithOptions(opts vfs.Options) *FileSystem { + + // only set options if vfs.Options is sftp.Options + if opts, ok := opts.(Options); ok { + fs.options = opts + // we set client to nil to ensure that a new client is created using the new context when Client() is called + fs.sftpclient = nil + } + return fs +} + +// WithClient passes in an sftp client and returns the filesystem (chainable) +func (fs *FileSystem) WithClient(client interface{}) *FileSystem { + switch client.(type) { + case Client, *ssh.Client: + fs.sftpclient = client.(Client) + fs.options = nil + } + return fs +} + +// NewFileSystem initializer for fileSystem struct. +func NewFileSystem() *FileSystem { + return &FileSystem{} +} + +func init() { + defaultClientGetter = getClient + + // registers a default Filesystem + backend.Register(Scheme, NewFileSystem()) +} + +// Client is an interface to make it easier to test +type Client interface { + Chmod(path string, mode os.FileMode) error + Chtimes(path string, atime, mtime time.Time) error + Create(path string) (*_sftp.File, error) + MkdirAll(path string) error + OpenFile(path string, f int) (*_sftp.File, error) + ReadDir(p string) ([]os.FileInfo, error) + Remove(path string) error + Rename(oldname, newname string) error + Stat(p string) (os.FileInfo, error) + Close() error +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/sftp/location.go b/vendor/github.com/c2fo/vfs/v6/backend/sftp/location.go new file mode 100644 index 000000000..f3c206030 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/sftp/location.go @@ -0,0 +1,222 @@ +package sftp + +import ( + "errors" + "os" + "path" + "regexp" + "strings" + "unicode/utf8" + + "github.com/c2fo/vfs/v6" + "github.com/c2fo/vfs/v6/options" + "github.com/c2fo/vfs/v6/utils" +) + +// Location implements the vfs.Location interface specific to sftp fs. +type Location struct { + fileSystem *FileSystem + path string + Authority utils.Authority +} + +// List calls SFTP ReadDir to list all files in the location's path. +// If you have many thousands of files at the given location, this could become quite expensive. +func (l *Location) List() ([]string, error) { + + var filenames []string + client, err := l.fileSystem.Client(l.Authority) + if err != nil { + return filenames, err + } + // start timer once action is completed + defer l.fileSystem.connTimerStart() + + fileinfos, err := client.ReadDir(l.Path()) + if err != nil { + if err == os.ErrNotExist { + return filenames, nil + } + return filenames, err + } + for _, fileinfo := range fileinfos { + if !fileinfo.IsDir() { + filenames = append(filenames, fileinfo.Name()) + } + } + + return filenames, nil +} + +// ListByPrefix calls SFTP ReadDir with the location's path modified relatively by the prefix arg passed to the function. +func (l *Location) ListByPrefix(prefix string) ([]string, error) { + + var filenames []string + client, err := l.fileSystem.Client(l.Authority) + if err != nil { + return filenames, err + } + // start timer once action is completed + defer l.fileSystem.connTimerStart() + + fullpath := path.Join(l.Path(), prefix) + // check if last char is not /, aka is not a dir, get base of path + baseprefix := "" + r, _ := utf8.DecodeLastRuneInString(fullpath) + if r != '/' { + baseprefix = path.Base(fullpath) + } + fullpath = utils.EnsureTrailingSlash(path.Dir(fullpath)) + fileinfos, err := client.ReadDir(fullpath) + if err != nil { + return filenames, err + } + + for _, fileinfo := range fileinfos { + if !fileinfo.IsDir() { + name := fileinfo.Name() + if baseprefix != "" { + if strings.HasPrefix(name, baseprefix) { + filenames = append(filenames, name) + } + } else { + filenames = append(filenames, name) + } + } + } + + return filenames, nil +} + +// ListByRegex retrieves the filenames of all the files at the location's current path, then filters out all those +// that don't match the given regex. The resource considerations of List() apply here as well. +func (l *Location) ListByRegex(regex *regexp.Regexp) ([]string, error) { + + filenames, err := l.List() + if err != nil { + return []string{}, err + } + + var filteredFilenames []string + for _, filename := range filenames { + if regex.MatchString(filename) { + filteredFilenames = append(filteredFilenames, filename) + } + } + return filteredFilenames, nil +} + +// Volume returns the Authority the location is contained in. +func (l *Location) Volume() string { + return l.Authority.String() +} + +// Path returns the path the location references in most SFTP calls. +func (l *Location) Path() string { + return utils.EnsureLeadingSlash(utils.EnsureTrailingSlash(l.path)) +} + +// Exists returns true if the remote SFTP file exists. +func (l *Location) Exists() (bool, error) { + + client, err := l.fileSystem.Client(l.Authority) + if err != nil { + return false, err + } + // start timer once action is completed + defer l.fileSystem.connTimerStart() + + info, err := client.Stat(l.Path()) + if err != nil && err == os.ErrNotExist { + return false, nil + } else if err != nil { + return false, err + } + + if !info.IsDir() { + return false, err + } + + return true, nil +} + +// NewLocation makes a copy of the underlying Location, then modifies its path by calling ChangeDir with the +// relativePath argument, returning the resulting location. The only possible errors come from the call to +// ChangeDir, which, for the SFTP implementation doesn't ever result in an error. +func (l *Location) NewLocation(relativePath string) (vfs.Location, error) { + if l == nil { + return nil, errors.New("non-nil sftp.Location pointer receiver is required") + } + + // make a copy of the original location first, then ChangeDir, leaving the original location as-is + newLocation := &Location{} + *newLocation = *l + err := newLocation.ChangeDir(relativePath) + if err != nil { + return nil, err + } + return newLocation, nil +} + +// ChangeDir takes a relative path, and modifies the underlying Location's path. The caller is modified by this +// so the only return is any error. For this implementation there are no errors. +func (l *Location) ChangeDir(relativePath string) error { + if l == nil { + return errors.New("non-nil sftp.Location pointer receiver is required") + } + if relativePath == "" { + return errors.New("non-empty string relativePath is required") + } + err := utils.ValidateRelativeLocationPath(relativePath) + if err != nil { + return err + } + l.path = utils.EnsureLeadingSlash(utils.EnsureTrailingSlash(path.Join(l.path, relativePath))) + return nil +} + +// NewFile uses the properties of the calling location to generate a vfs.File (backed by an sftp.File). The filePath +// argument is expected to be a relative path to the location's current path. +func (l *Location) NewFile(filePath string) (vfs.File, error) { + if l == nil { + return nil, errors.New("non-nil sftp.Location pointer receiver is required") + } + if filePath == "" { + return nil, errors.New("non-empty string filePath is required") + } + err := utils.ValidateRelativeFilePath(filePath) + if err != nil { + return nil, err + } + newFile := &File{ + fileSystem: l.fileSystem, + Authority: l.Authority, + path: utils.EnsureLeadingSlash(path.Join(l.path, filePath)), + } + return newFile, nil +} + +// DeleteFile removes the file at fileName path. +func (l *Location) DeleteFile(fileName string, opts ...options.DeleteOption) error { + file, err := l.NewFile(fileName) + if err != nil { + return err + } + + return file.Delete(opts...) +} + +// FileSystem returns a vfs.fileSystem interface of the location's underlying fileSystem. +func (l *Location) FileSystem() vfs.FileSystem { + return l.fileSystem +} + +// URI returns the Location's URI as a string. +func (l *Location) URI() string { + return utils.EncodeURI(l.FileSystem().Scheme(), l.Authority.UserInfo().Username(), l.Authority.HostPortStr(), l.Path()) +} + +// String implement fmt.Stringer, returning the location's URI as the default string. +func (l *Location) String() string { + return l.URI() +} diff --git a/vendor/github.com/c2fo/vfs/v6/backend/sftp/options.go b/vendor/github.com/c2fo/vfs/v6/backend/sftp/options.go new file mode 100644 index 000000000..071495e9b --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/backend/sftp/options.go @@ -0,0 +1,327 @@ +package sftp + +import ( + "errors" + "fmt" + "io" + "os" + "path" + "runtime" + "strconv" + + "github.com/mitchellh/go-homedir" + _sftp "github.com/pkg/sftp" + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/knownhosts" + + "github.com/c2fo/vfs/v6/utils" +) + +const systemWideKnownHosts = "/etc/ssh/ssh_known_hosts" + +// Options holds sftp-specific options. Currently only client options are used. +type Options struct { + Password string `json:"password,omitempty"` // env var VFS_SFTP_PASSWORD + KeyFilePath string `json:"keyFilePath,omitempty"` // env var VFS_SFTP_KEYFILE + KeyPassphrase string `json:"keyPassphrase,omitempty"` // env var VFS_SFTP_KEYFILE_PASSPHRASE + KnownHostsFile string `json:"knownHostsFile,omitempty"` // env var VFS_SFTP_KNOWN_HOSTS_FILE + KnownHostsString string `json:"knownHostsString,omitempty"` + KeyExchanges []string `json:"keyExchanges,omitempty"` + Ciphers []string `json:"cihers,omitempty"` + MACs []string `json:"macs,omitempty"` + HostKeyAlgorithms []string `json:"hostKeyAlgorithms,omitempty"` + AutoDisconnect int `json:"autoDisconnect,omitempty"` // seconds before disconnecting. default: 10 + KnownHostsCallback ssh.HostKeyCallback // env var VFS_SFTP_INSECURE_KNOWN_HOSTS + FileBufferSize int `json:"fileBufferSize,omitempty"` // Buffer Size In Bytes Used with utils.TouchCopyBuffered + FilePermissions *string `json:"filePermissions,omitempty"` // Default File Permissions for new files +} + +// GetFileMode converts the FilePermissions string to os.FileMode. +func (o *Options) GetFileMode() (*os.FileMode, error) { + if o.FilePermissions == nil { + return nil, nil + } + + // Convert the string to an unsigned integer, interpreting it as an octal value + parsed, err := strconv.ParseUint(*o.FilePermissions, 0, 32) + if err != nil { + return nil, fmt.Errorf("invalid file mode: %v", err) + } + mode := os.FileMode(parsed) + return &mode, nil +} + +var defaultSSHConfig = &ssh.ClientConfig{ + HostKeyAlgorithms: []string{ + "rsa-sha2-256-cert-v01@openssh.com", + "rsa-sha2-512-cert-v01@openssh.com", + "ssh-rsa-cert-v01@openssh.com", + "ecdsa-sha2-nistp256-cert-v01@openssh.com", + "ecdsa-sha2-nistp384-cert-v01@openssh.com", + "ecdsa-sha2-nistp521-cert-v01@openssh.com", + "ssh-ed25519-cert-v01@openssh.com", + "ssh-ed25519", + "ecdsa-sha2-nistp256", + "ecdsa-sha2-nistp384", + "ecdsa-sha2-nistp521", + "ssh-rsa", + "rsa-sha2-256", + "rsa-sha2-512", + "sk-ssh-ed25519@openssh.com", + "sk-ecdsa-sha2-nistp256@openssh.com", + }, + Config: ssh.Config{ + KeyExchanges: []string{ + "curve25519-sha256", + "curve25519-sha256@libssh.org", + "ecdh-sha2-nistp256", + "ecdh-sha2-nistp384", + "ecdh-sha2-nistp521", + "diffie-hellman-group-exchange-sha256", + "diffie-hellman-group16-sha512", + "diffie-hellman-group18-sha512", + "diffie-hellman-group14-sha256", + "diffie-hellman-group14-sha1", + }, + Ciphers: []string{ + "aes128-gcm@openssh.com", + "aes256-gcm@openssh.com", + "chacha20-poly1305@openssh.com", + "aes256-ctr", + "aes192-ctr", + "aes128-ctr", + "aes128-cbc", + "3des-cbc", + }, + MACs: []string{ + "hmac-sha2-256-etm@openssh.com", + "hmac-sha2-512-etm@openssh.com", + "hmac-sha2-256", + "hmac-sha2-512", + "hmac-sha1", + "hmac-sha1-96", + }, + }, +} + +func getClient(authority utils.Authority, opts Options) (Client, io.Closer, error) { + + // setup Authentication + authMethods, err := getAuthMethods(opts) + if err != nil { + return nil, nil, err + } + + // get callback for handling known_hosts man-in-the-middle checks + hostKeyCallback, err := getHostKeyCallback(opts) + if err != nil { + return nil, nil, err + } + + // Define the Client Config + config := getSShConfig(opts) + config.User = authority.UserInfo().Username() + config.Auth = authMethods + config.HostKeyCallback = hostKeyCallback + + // default to port 22 + host := fmt.Sprintf("%s:%d", authority.Host(), authority.Port()) + if authority.Port() == 0 { + host = fmt.Sprintf("%s:%d", host, 22) + } + + // TODO begin timeout until session is created + sshConn, err := ssh.Dial("tcp", host, config) + if err != nil { + return nil, nil, err + } + + sftpClient, err := _sftp.NewClient(sshConn) + if err != nil { + return nil, nil, err + } + + return sftpClient, sshConn, nil +} + +// getSShConfig gets ssh config from Options +func getSShConfig(opts Options) *ssh.ClientConfig { + // copy default config + config := *defaultSSHConfig + + // override default config with any user-defined config + if opts.HostKeyAlgorithms != nil { + config.HostKeyAlgorithms = opts.HostKeyAlgorithms + } + if opts.Ciphers != nil { + config.Config.Ciphers = opts.Ciphers + } + if opts.KeyExchanges != nil { + config.Config.KeyExchanges = opts.KeyExchanges + } + if opts.MACs != nil { + config.Config.MACs = opts.MACs + } + + return &config +} + +// getHostKeyCallback gets host key callback for all known_hosts files +func getHostKeyCallback(opts Options) (ssh.HostKeyCallback, error) { + var knownHostsFiles []string + switch { + + // use explicit callback in Options + case opts.KnownHostsCallback != nil: + return opts.KnownHostsCallback, nil + + case opts.KnownHostsString != "": + hostKey, _, _, _, err := ssh.ParseAuthorizedKey([]byte(opts.KnownHostsString)) + if err != nil { + return nil, err + } + return ssh.FixedHostKey(hostKey), nil + + // use env var known_hosts file path, ie, /home/bob/.ssh/known_hosts + case opts.KnownHostsFile != "": + // check first to prevent auto-vivification of file + found, err := foundFile(opts.KnownHostsFile) + if err != nil { + return nil, err + } + if found { + knownHostsFiles = append(knownHostsFiles, opts.KnownHostsFile) + break + } + // use env var if explicit file wasn't found wasn't found + fallthrough + + // use env var known_hosts file path, ie, /home/bob/.ssh/known_hosts + case os.Getenv("VFS_SFTP_KNOWN_HOSTS_FILE") != "": + // check first to prevent auto-vivification of file + found, err := foundFile(os.Getenv("VFS_SFTP_KNOWN_HOSTS_FILE")) + if err != nil { + return nil, err + } + if found { + knownHostsFiles = append(knownHostsFiles, os.Getenv("VFS_SFTP_KNOWN_HOSTS_FILE")) + break + } + // use default if env var file wasn't found + fallthrough + + // use env var known_hosts file path, ie, /home/bob/.ssh/known_hosts + case os.Getenv("VFS_SFTP_INSECURE_KNOWN_HOSTS") != "": + return ssh.InsecureIgnoreHostKey(), nil //nolint:gosec // this is only use if a uer specifically call it (testing) + + // use user/system-wide known_hosts paths (as defined by OpenSSH https://man.openbsd.org/ssh) + default: + var err error + knownHostsFiles, err = findHomeSystemKnownHosts(knownHostsFiles) + if err != nil { + return nil, err + } + } + + // get host key callback for all known_hosts files + return knownhosts.New(knownHostsFiles...) +} + +func findHomeSystemKnownHosts(knownHostsFiles []string) ([]string, error) { + // add ~/.ssh/known_hosts + home, err := homedir.Dir() + if err != nil { + return nil, err + } + homeKnonwHostsPath := utils.EnsureLeadingSlash(path.Join(home, ".ssh/known_hosts")) + + // check file existence first to prevent auto-vivification of file + found, err := foundFile(homeKnonwHostsPath) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return nil, err + } + if found { + knownHostsFiles = append(knownHostsFiles, homeKnonwHostsPath) + } + + // add /etc/ssh/.ssh/known_hosts for unix-like systems. SSH doesn't exist natively on Windows and each + // implementation has a different location for known_hosts. Better to specify in KnownHostsFile for Windows + if runtime.GOOS != "windows" { + // check file existence first to prevent auto-vivification of file + found, err := foundFile(systemWideKnownHosts) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return nil, err + } + if found { + knownHostsFiles = append(knownHostsFiles, systemWideKnownHosts) + } + } + return knownHostsFiles, nil +} + +func foundFile(file string) (bool, error) { + if _, err := os.Stat(file); err != nil { + if os.IsNotExist(err) { + // file does not exist + return false, nil + } + // other error + return false, err + } + return true, nil +} + +func getAuthMethods(opts Options) ([]ssh.AuthMethod, error) { + auth := make([]ssh.AuthMethod, 0) + + // explicitly set password from opts, then from env if any + pw := os.Getenv("VFS_SFTP_PASSWORD") + if opts.Password != "" { + pw = opts.Password + } + if pw != "" { + auth = append(auth, ssh.Password(pw)) + } + + // setup key-based auth from env, if any + keyfile := os.Getenv("VFS_SFTP_KEYFILE") + if opts.KeyFilePath != "" { + keyfile = opts.KeyFilePath + } + if keyfile != "" { + // gather passphrase, if any + passphrase := os.Getenv("VFS_SFTP_KEYFILE_PASSPHRASE") + if opts.KeyPassphrase != "" { + passphrase = opts.KeyPassphrase + } + + // setup keyfile + secretKey, err := getKeyFile(keyfile, passphrase) + if err != nil { + return []ssh.AuthMethod{}, err + } + auth = append(auth, ssh.PublicKeys(secretKey)) + } + + return auth, nil +} + +func getKeyFile(file, passphrase string) (key ssh.Signer, err error) { + + buf, err := os.ReadFile(file) //nolint:gosec + if err != nil { + return + } + if passphrase != "" { + key, err = ssh.ParsePrivateKeyWithPassphrase(buf, []byte(passphrase)) + if err != nil { + return + } + } else { + key, err = ssh.ParsePrivateKey(buf) + if err != nil { + return + } + } + return +} diff --git a/vendor/github.com/c2fo/vfs/v6/doc.go b/vendor/github.com/c2fo/vfs/v6/doc.go new file mode 100644 index 000000000..d8b6cb443 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/doc.go @@ -0,0 +1,142 @@ +/* +Package vfs provides a pluggable, extensible, and opinionated set of file system +functionality for Go across a number of file system types such as os, S3, and GCS. + +# Philosophy + +When building our platform, initially we wrote a library that was something to the effect of + + if config.DISK == "S3" { + // do some s3 file system operation + } else if config.DISK == "mock" { + // fake something + } else { + // do some native os.xxx operation + } + +Not only was ugly but because the behaviors of each "file system" were different and we had to constantly alter the +file locations and pass a bucket string (even if the fs didn't know what a bucket was). + +We found a handful of third-party libraries that were interesting but none of them had everything we needed/wanted. Of +particular inspiration was https://github.com/spf13/afero in its composition of the super-powerful stdlib io.* interfaces. +Unfortunately, it didn't support Google Cloud Storage and there was still a lot of passing around of strings and structs. +Few, if any, of the vfs-like libraries provided interfaces to easily and confidently create new file system backends. + +What we needed/wanted was the following(and more): + - self-contained set of structs that could be passed around like a file/dir handle + - the struct would represent an existing or nonexistent file/dir + - provide common (and only common) functionality across all file system so that after initialization, we don't care + what the underlying file system is and can therefore write our code agnostically/portably + - use io.* interfaces such as io.Reader and io.Writer without needing to call a separate function + - extensibility to easily add other needed file systems like Microsoft Azure Cloud File Storage or SFTP + - prefer native atomic functions when possible (ie S3 to S3 moving would use the native move api call rather than + copy-delete) + - a uniform way of addressing files regardless of file system. This is why we use complete URI's in vfssimple + - fmt.Stringer interface so that the file struct passed to a log message (or other Stringer use) would show the URI + - mockable file system + - pluggability so that third-party implementations of our interfaces could be used + +# Install + +Pre 1.17: + + go get -u github.com/c2fo/vfs/v6 + +Post 1.17: + + go install -u github.com/c2fo/vfs/v6 + +# Upgrading + +Upgrading from v5 to v6 +With v6.0.0, sftp.Options struct changed to accept an array of Key Exchange algorithms rather than a string. +To update, change the syntax of the auth commands. + + "keyExchanges":"diffie-hellman-group-a256" + +becomes + + "keyExchanges":["diffie-hellman-group-a256"] + +# Usage + +We provide vfssimple as basic way of initializing file system backends (see each implementations's docs about authentication). +vfssimple pulls in every c2fo/vfs backend. If you need to reduce the backend requirements (and app memory footprint) or +add a third party backend, you'll need to implement your own "factory". See backend doc for more info. + +You can then use those file systems to initialize locations which you'll be referencing frequently, or initialize files directly + + osFile, err := vfssimple.NewFile("file:///path/to/file.txt") + s3File, err := vfssimple.NewFile("s3://bucket/prefix/file.txt") + + osLocation, err := vfssimple.NewLocation("file:///tmp/") + s3Location, err := vfssimple.NewLocation("s3://bucket/") + + osTmpFile, err := osLocation.NewFile("anotherFile.txt") // file at /tmp/anotherFile.txt + +You can perform a number of actions without any consideration for the underlying system's api or implementation details. + + osFileExists, err := osFile.Exists() // true, nil + s3FileExists, err := s3File.Exists() // false, nil + err = osFile.CopyToFile(s3File) // nil + s3FileExists, err = s3File.Exists() // true, nil + + movedOsFile, err := osFile.MoveToLocation(osLocation) + osFileExists, err = osFile.Exists() // false, nil (move actions delete the original file) + movedOsFileExists, err := movedOsFile.Exists() // true, nil + + s3FileUri := s3File.URI() // s3://bucket/prefix/file.txt + s3FileName := s3File.Name() // file.txt + s3FilePath := s3File.Path() // /prefix/file.txt + +File's io.* interfaces may be used directly: + + reader := strings.NewReader("Clear is better than clever") + gsFile, err := vfssimple.NewFile("gs://somebucket/path/to/file.txt") + + byteCount, err := io.Copy(gsFile, reader) + err := gsFile.Close() + +# Third-party Backends + +* none so far + +Feel free to send a pull request if you want to add your backend to the list. + +# Ideas +See https://github.com/C2FO/vfs/discussions + + - Provide better List() functionality with more abstracted filtering and paging (iterator?) Return File structs vs URIs? + - Add better/any context.Context() support + - Standardize errors (sentinel errors?) and utilize error wrapping + - Implement (functional options)[https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis] vs empty interface vfs.Options + +Contributing + + 1. Fork it () + 2. Create your feature branch (`git checkout -b feature/fooBar`) + 3. Commit your changes (`git commit -am 'Add some fooBar'`) + 4. Push to the branch (`git push origin feature/fooBar`) + 5. Create a new Pull Request + +# License + +Distributed under the MIT license. See `http://github.com/c2fo/vfs/License.md for more information. + +# Definitions + +* absolute path - A path is said to be absolute if it provides the entire context need to find a file, including the +file system root. An absolute path must begin with a slash and may include . and .. directories. + +* file path - A file path ends with a filename and therefore may not end with a slash. It may be relative or absolute. + +* location path - A location/dir path must end with a slash. It may be relative or absolute. + +* relative path - A relative path is a way to locate a dir or file relative to another directory. A relative path may not +begin with a slash but may include . and .. directories. + +* URI - A Uniform Resource Identifier (URI) is a string of characters that unambiguously identifies a particular resource. +To guarantee uniformity, all URIs follow a predefined set of syntax rules, but also maintain extensibility through +a separately defined hierarchical naming scheme (e.g. http://). +*/ +package vfs diff --git a/vendor/github.com/c2fo/vfs/v6/errors.go b/vendor/github.com/c2fo/vfs/v6/errors.go new file mode 100644 index 000000000..b2a1f5a34 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/errors.go @@ -0,0 +1,21 @@ +package vfs + +// Error is a type that allows for error constants below +type Error string + +// Error returns a string representation of the error +func (e Error) Error() string { return string(e) } + +const ( + // CopyToNotPossible - CopyTo/MoveTo operations are only possible when seek position is 0,0 + CopyToNotPossible = Error("current cursor offset is not 0 as required for this operation") + + // ErrNotExist - File does not exist + ErrNotExist = Error("file does not exist") + + // ErrSeekInvalidOffset - Offset is invalid. Must be greater than or equal to 0 + ErrSeekInvalidOffset = Error("seek: invalid offset") + + // ErrSeekInvalidWhence - Whence is invalid. Must be one of the following: 0 (io.SeekStart), 1 (io.SeekCurrent), or 2 (io.SeekEnd) + ErrSeekInvalidWhence = Error("seek: invalid whence") +) diff --git a/vendor/github.com/c2fo/vfs/v6/mocks/File.go b/vendor/github.com/c2fo/vfs/v6/mocks/File.go new file mode 100644 index 000000000..c408c2425 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/mocks/File.go @@ -0,0 +1,959 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mocks + +import ( + options "github.com/c2fo/vfs/v6/options" + mock "github.com/stretchr/testify/mock" + + time "time" + + vfs "github.com/c2fo/vfs/v6" +) + +// File is an autogenerated mock type for the File type +type File struct { + mock.Mock +} + +type File_Expecter struct { + mock *mock.Mock +} + +func (_m *File) EXPECT() *File_Expecter { + return &File_Expecter{mock: &_m.Mock} +} + +// Close provides a mock function with given fields: +func (_m *File) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// File_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' +type File_Close_Call struct { + *mock.Call +} + +// Close is a helper method to define mock.On call +func (_e *File_Expecter) Close() *File_Close_Call { + return &File_Close_Call{Call: _e.mock.On("Close")} +} + +func (_c *File_Close_Call) Run(run func()) *File_Close_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *File_Close_Call) Return(_a0 error) *File_Close_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *File_Close_Call) RunAndReturn(run func() error) *File_Close_Call { + _c.Call.Return(run) + return _c +} + +// CopyToFile provides a mock function with given fields: file +func (_m *File) CopyToFile(file vfs.File) error { + ret := _m.Called(file) + + if len(ret) == 0 { + panic("no return value specified for CopyToFile") + } + + var r0 error + if rf, ok := ret.Get(0).(func(vfs.File) error); ok { + r0 = rf(file) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// File_CopyToFile_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CopyToFile' +type File_CopyToFile_Call struct { + *mock.Call +} + +// CopyToFile is a helper method to define mock.On call +// - file vfs.File +func (_e *File_Expecter) CopyToFile(file interface{}) *File_CopyToFile_Call { + return &File_CopyToFile_Call{Call: _e.mock.On("CopyToFile", file)} +} + +func (_c *File_CopyToFile_Call) Run(run func(file vfs.File)) *File_CopyToFile_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(vfs.File)) + }) + return _c +} + +func (_c *File_CopyToFile_Call) Return(_a0 error) *File_CopyToFile_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *File_CopyToFile_Call) RunAndReturn(run func(vfs.File) error) *File_CopyToFile_Call { + _c.Call.Return(run) + return _c +} + +// CopyToLocation provides a mock function with given fields: location +func (_m *File) CopyToLocation(location vfs.Location) (vfs.File, error) { + ret := _m.Called(location) + + if len(ret) == 0 { + panic("no return value specified for CopyToLocation") + } + + var r0 vfs.File + var r1 error + if rf, ok := ret.Get(0).(func(vfs.Location) (vfs.File, error)); ok { + return rf(location) + } + if rf, ok := ret.Get(0).(func(vfs.Location) vfs.File); ok { + r0 = rf(location) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(vfs.File) + } + } + + if rf, ok := ret.Get(1).(func(vfs.Location) error); ok { + r1 = rf(location) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// File_CopyToLocation_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CopyToLocation' +type File_CopyToLocation_Call struct { + *mock.Call +} + +// CopyToLocation is a helper method to define mock.On call +// - location vfs.Location +func (_e *File_Expecter) CopyToLocation(location interface{}) *File_CopyToLocation_Call { + return &File_CopyToLocation_Call{Call: _e.mock.On("CopyToLocation", location)} +} + +func (_c *File_CopyToLocation_Call) Run(run func(location vfs.Location)) *File_CopyToLocation_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(vfs.Location)) + }) + return _c +} + +func (_c *File_CopyToLocation_Call) Return(_a0 vfs.File, _a1 error) *File_CopyToLocation_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *File_CopyToLocation_Call) RunAndReturn(run func(vfs.Location) (vfs.File, error)) *File_CopyToLocation_Call { + _c.Call.Return(run) + return _c +} + +// Delete provides a mock function with given fields: deleteOpts +func (_m *File) Delete(deleteOpts ...options.DeleteOption) error { + _va := make([]interface{}, len(deleteOpts)) + for _i := range deleteOpts { + _va[_i] = deleteOpts[_i] + } + var _ca []interface{} + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Delete") + } + + var r0 error + if rf, ok := ret.Get(0).(func(...options.DeleteOption) error); ok { + r0 = rf(deleteOpts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// File_Delete_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Delete' +type File_Delete_Call struct { + *mock.Call +} + +// Delete is a helper method to define mock.On call +// - deleteOpts ...options.DeleteOption +func (_e *File_Expecter) Delete(deleteOpts ...interface{}) *File_Delete_Call { + return &File_Delete_Call{Call: _e.mock.On("Delete", + append([]interface{}{}, deleteOpts...)...)} +} + +func (_c *File_Delete_Call) Run(run func(deleteOpts ...options.DeleteOption)) *File_Delete_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]options.DeleteOption, len(args)-0) + for i, a := range args[0:] { + if a != nil { + variadicArgs[i] = a.(options.DeleteOption) + } + } + run(variadicArgs...) + }) + return _c +} + +func (_c *File_Delete_Call) Return(_a0 error) *File_Delete_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *File_Delete_Call) RunAndReturn(run func(...options.DeleteOption) error) *File_Delete_Call { + _c.Call.Return(run) + return _c +} + +// Exists provides a mock function with given fields: +func (_m *File) Exists() (bool, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Exists") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func() (bool, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// File_Exists_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Exists' +type File_Exists_Call struct { + *mock.Call +} + +// Exists is a helper method to define mock.On call +func (_e *File_Expecter) Exists() *File_Exists_Call { + return &File_Exists_Call{Call: _e.mock.On("Exists")} +} + +func (_c *File_Exists_Call) Run(run func()) *File_Exists_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *File_Exists_Call) Return(_a0 bool, _a1 error) *File_Exists_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *File_Exists_Call) RunAndReturn(run func() (bool, error)) *File_Exists_Call { + _c.Call.Return(run) + return _c +} + +// LastModified provides a mock function with given fields: +func (_m *File) LastModified() (*time.Time, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LastModified") + } + + var r0 *time.Time + var r1 error + if rf, ok := ret.Get(0).(func() (*time.Time, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *time.Time); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*time.Time) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// File_LastModified_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LastModified' +type File_LastModified_Call struct { + *mock.Call +} + +// LastModified is a helper method to define mock.On call +func (_e *File_Expecter) LastModified() *File_LastModified_Call { + return &File_LastModified_Call{Call: _e.mock.On("LastModified")} +} + +func (_c *File_LastModified_Call) Run(run func()) *File_LastModified_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *File_LastModified_Call) Return(_a0 *time.Time, _a1 error) *File_LastModified_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *File_LastModified_Call) RunAndReturn(run func() (*time.Time, error)) *File_LastModified_Call { + _c.Call.Return(run) + return _c +} + +// Location provides a mock function with given fields: +func (_m *File) Location() vfs.Location { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Location") + } + + var r0 vfs.Location + if rf, ok := ret.Get(0).(func() vfs.Location); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(vfs.Location) + } + } + + return r0 +} + +// File_Location_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Location' +type File_Location_Call struct { + *mock.Call +} + +// Location is a helper method to define mock.On call +func (_e *File_Expecter) Location() *File_Location_Call { + return &File_Location_Call{Call: _e.mock.On("Location")} +} + +func (_c *File_Location_Call) Run(run func()) *File_Location_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *File_Location_Call) Return(_a0 vfs.Location) *File_Location_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *File_Location_Call) RunAndReturn(run func() vfs.Location) *File_Location_Call { + _c.Call.Return(run) + return _c +} + +// MoveToFile provides a mock function with given fields: file +func (_m *File) MoveToFile(file vfs.File) error { + ret := _m.Called(file) + + if len(ret) == 0 { + panic("no return value specified for MoveToFile") + } + + var r0 error + if rf, ok := ret.Get(0).(func(vfs.File) error); ok { + r0 = rf(file) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// File_MoveToFile_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'MoveToFile' +type File_MoveToFile_Call struct { + *mock.Call +} + +// MoveToFile is a helper method to define mock.On call +// - file vfs.File +func (_e *File_Expecter) MoveToFile(file interface{}) *File_MoveToFile_Call { + return &File_MoveToFile_Call{Call: _e.mock.On("MoveToFile", file)} +} + +func (_c *File_MoveToFile_Call) Run(run func(file vfs.File)) *File_MoveToFile_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(vfs.File)) + }) + return _c +} + +func (_c *File_MoveToFile_Call) Return(_a0 error) *File_MoveToFile_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *File_MoveToFile_Call) RunAndReturn(run func(vfs.File) error) *File_MoveToFile_Call { + _c.Call.Return(run) + return _c +} + +// MoveToLocation provides a mock function with given fields: location +func (_m *File) MoveToLocation(location vfs.Location) (vfs.File, error) { + ret := _m.Called(location) + + if len(ret) == 0 { + panic("no return value specified for MoveToLocation") + } + + var r0 vfs.File + var r1 error + if rf, ok := ret.Get(0).(func(vfs.Location) (vfs.File, error)); ok { + return rf(location) + } + if rf, ok := ret.Get(0).(func(vfs.Location) vfs.File); ok { + r0 = rf(location) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(vfs.File) + } + } + + if rf, ok := ret.Get(1).(func(vfs.Location) error); ok { + r1 = rf(location) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// File_MoveToLocation_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'MoveToLocation' +type File_MoveToLocation_Call struct { + *mock.Call +} + +// MoveToLocation is a helper method to define mock.On call +// - location vfs.Location +func (_e *File_Expecter) MoveToLocation(location interface{}) *File_MoveToLocation_Call { + return &File_MoveToLocation_Call{Call: _e.mock.On("MoveToLocation", location)} +} + +func (_c *File_MoveToLocation_Call) Run(run func(location vfs.Location)) *File_MoveToLocation_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(vfs.Location)) + }) + return _c +} + +func (_c *File_MoveToLocation_Call) Return(_a0 vfs.File, _a1 error) *File_MoveToLocation_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *File_MoveToLocation_Call) RunAndReturn(run func(vfs.Location) (vfs.File, error)) *File_MoveToLocation_Call { + _c.Call.Return(run) + return _c +} + +// Name provides a mock function with given fields: +func (_m *File) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// File_Name_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Name' +type File_Name_Call struct { + *mock.Call +} + +// Name is a helper method to define mock.On call +func (_e *File_Expecter) Name() *File_Name_Call { + return &File_Name_Call{Call: _e.mock.On("Name")} +} + +func (_c *File_Name_Call) Run(run func()) *File_Name_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *File_Name_Call) Return(_a0 string) *File_Name_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *File_Name_Call) RunAndReturn(run func() string) *File_Name_Call { + _c.Call.Return(run) + return _c +} + +// Path provides a mock function with given fields: +func (_m *File) Path() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Path") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// File_Path_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Path' +type File_Path_Call struct { + *mock.Call +} + +// Path is a helper method to define mock.On call +func (_e *File_Expecter) Path() *File_Path_Call { + return &File_Path_Call{Call: _e.mock.On("Path")} +} + +func (_c *File_Path_Call) Run(run func()) *File_Path_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *File_Path_Call) Return(_a0 string) *File_Path_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *File_Path_Call) RunAndReturn(run func() string) *File_Path_Call { + _c.Call.Return(run) + return _c +} + +// Read provides a mock function with given fields: p +func (_m *File) Read(p []byte) (int, error) { + ret := _m.Called(p) + + if len(ret) == 0 { + panic("no return value specified for Read") + } + + var r0 int + var r1 error + if rf, ok := ret.Get(0).(func([]byte) (int, error)); ok { + return rf(p) + } + if rf, ok := ret.Get(0).(func([]byte) int); ok { + r0 = rf(p) + } else { + r0 = ret.Get(0).(int) + } + + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(p) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// File_Read_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Read' +type File_Read_Call struct { + *mock.Call +} + +// Read is a helper method to define mock.On call +// - p []byte +func (_e *File_Expecter) Read(p interface{}) *File_Read_Call { + return &File_Read_Call{Call: _e.mock.On("Read", p)} +} + +func (_c *File_Read_Call) Run(run func(p []byte)) *File_Read_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].([]byte)) + }) + return _c +} + +func (_c *File_Read_Call) Return(n int, err error) *File_Read_Call { + _c.Call.Return(n, err) + return _c +} + +func (_c *File_Read_Call) RunAndReturn(run func([]byte) (int, error)) *File_Read_Call { + _c.Call.Return(run) + return _c +} + +// Seek provides a mock function with given fields: offset, whence +func (_m *File) Seek(offset int64, whence int) (int64, error) { + ret := _m.Called(offset, whence) + + if len(ret) == 0 { + panic("no return value specified for Seek") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(int64, int) (int64, error)); ok { + return rf(offset, whence) + } + if rf, ok := ret.Get(0).(func(int64, int) int64); ok { + r0 = rf(offset, whence) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(int64, int) error); ok { + r1 = rf(offset, whence) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// File_Seek_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Seek' +type File_Seek_Call struct { + *mock.Call +} + +// Seek is a helper method to define mock.On call +// - offset int64 +// - whence int +func (_e *File_Expecter) Seek(offset interface{}, whence interface{}) *File_Seek_Call { + return &File_Seek_Call{Call: _e.mock.On("Seek", offset, whence)} +} + +func (_c *File_Seek_Call) Run(run func(offset int64, whence int)) *File_Seek_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(int64), args[1].(int)) + }) + return _c +} + +func (_c *File_Seek_Call) Return(_a0 int64, _a1 error) *File_Seek_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *File_Seek_Call) RunAndReturn(run func(int64, int) (int64, error)) *File_Seek_Call { + _c.Call.Return(run) + return _c +} + +// Size provides a mock function with given fields: +func (_m *File) Size() (uint64, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Size") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func() (uint64, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// File_Size_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Size' +type File_Size_Call struct { + *mock.Call +} + +// Size is a helper method to define mock.On call +func (_e *File_Expecter) Size() *File_Size_Call { + return &File_Size_Call{Call: _e.mock.On("Size")} +} + +func (_c *File_Size_Call) Run(run func()) *File_Size_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *File_Size_Call) Return(_a0 uint64, _a1 error) *File_Size_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *File_Size_Call) RunAndReturn(run func() (uint64, error)) *File_Size_Call { + _c.Call.Return(run) + return _c +} + +// String provides a mock function with given fields: +func (_m *File) String() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for String") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// File_String_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'String' +type File_String_Call struct { + *mock.Call +} + +// String is a helper method to define mock.On call +func (_e *File_Expecter) String() *File_String_Call { + return &File_String_Call{Call: _e.mock.On("String")} +} + +func (_c *File_String_Call) Run(run func()) *File_String_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *File_String_Call) Return(_a0 string) *File_String_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *File_String_Call) RunAndReturn(run func() string) *File_String_Call { + _c.Call.Return(run) + return _c +} + +// Touch provides a mock function with given fields: +func (_m *File) Touch() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Touch") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// File_Touch_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Touch' +type File_Touch_Call struct { + *mock.Call +} + +// Touch is a helper method to define mock.On call +func (_e *File_Expecter) Touch() *File_Touch_Call { + return &File_Touch_Call{Call: _e.mock.On("Touch")} +} + +func (_c *File_Touch_Call) Run(run func()) *File_Touch_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *File_Touch_Call) Return(_a0 error) *File_Touch_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *File_Touch_Call) RunAndReturn(run func() error) *File_Touch_Call { + _c.Call.Return(run) + return _c +} + +// URI provides a mock function with given fields: +func (_m *File) URI() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for URI") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// File_URI_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'URI' +type File_URI_Call struct { + *mock.Call +} + +// URI is a helper method to define mock.On call +func (_e *File_Expecter) URI() *File_URI_Call { + return &File_URI_Call{Call: _e.mock.On("URI")} +} + +func (_c *File_URI_Call) Run(run func()) *File_URI_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *File_URI_Call) Return(_a0 string) *File_URI_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *File_URI_Call) RunAndReturn(run func() string) *File_URI_Call { + _c.Call.Return(run) + return _c +} + +// Write provides a mock function with given fields: p +func (_m *File) Write(p []byte) (int, error) { + ret := _m.Called(p) + + if len(ret) == 0 { + panic("no return value specified for Write") + } + + var r0 int + var r1 error + if rf, ok := ret.Get(0).(func([]byte) (int, error)); ok { + return rf(p) + } + if rf, ok := ret.Get(0).(func([]byte) int); ok { + r0 = rf(p) + } else { + r0 = ret.Get(0).(int) + } + + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(p) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// File_Write_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Write' +type File_Write_Call struct { + *mock.Call +} + +// Write is a helper method to define mock.On call +// - p []byte +func (_e *File_Expecter) Write(p interface{}) *File_Write_Call { + return &File_Write_Call{Call: _e.mock.On("Write", p)} +} + +func (_c *File_Write_Call) Run(run func(p []byte)) *File_Write_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].([]byte)) + }) + return _c +} + +func (_c *File_Write_Call) Return(n int, err error) *File_Write_Call { + _c.Call.Return(n, err) + return _c +} + +func (_c *File_Write_Call) RunAndReturn(run func([]byte) (int, error)) *File_Write_Call { + _c.Call.Return(run) + return _c +} + +// NewFile creates a new instance of File. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFile(t interface { + mock.TestingT + Cleanup(func()) +}) *File { + mock := &File{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/vendor/github.com/c2fo/vfs/v6/mocks/FileSystem.go b/vendor/github.com/c2fo/vfs/v6/mocks/FileSystem.go new file mode 100644 index 000000000..929bdf916 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/mocks/FileSystem.go @@ -0,0 +1,290 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mocks + +import ( + vfs "github.com/c2fo/vfs/v6" + mock "github.com/stretchr/testify/mock" +) + +// FileSystem is an autogenerated mock type for the FileSystem type +type FileSystem struct { + mock.Mock +} + +type FileSystem_Expecter struct { + mock *mock.Mock +} + +func (_m *FileSystem) EXPECT() *FileSystem_Expecter { + return &FileSystem_Expecter{mock: &_m.Mock} +} + +// Name provides a mock function with given fields: +func (_m *FileSystem) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// FileSystem_Name_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Name' +type FileSystem_Name_Call struct { + *mock.Call +} + +// Name is a helper method to define mock.On call +func (_e *FileSystem_Expecter) Name() *FileSystem_Name_Call { + return &FileSystem_Name_Call{Call: _e.mock.On("Name")} +} + +func (_c *FileSystem_Name_Call) Run(run func()) *FileSystem_Name_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *FileSystem_Name_Call) Return(_a0 string) *FileSystem_Name_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *FileSystem_Name_Call) RunAndReturn(run func() string) *FileSystem_Name_Call { + _c.Call.Return(run) + return _c +} + +// NewFile provides a mock function with given fields: volume, absFilePath +func (_m *FileSystem) NewFile(volume string, absFilePath string) (vfs.File, error) { + ret := _m.Called(volume, absFilePath) + + if len(ret) == 0 { + panic("no return value specified for NewFile") + } + + var r0 vfs.File + var r1 error + if rf, ok := ret.Get(0).(func(string, string) (vfs.File, error)); ok { + return rf(volume, absFilePath) + } + if rf, ok := ret.Get(0).(func(string, string) vfs.File); ok { + r0 = rf(volume, absFilePath) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(vfs.File) + } + } + + if rf, ok := ret.Get(1).(func(string, string) error); ok { + r1 = rf(volume, absFilePath) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FileSystem_NewFile_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewFile' +type FileSystem_NewFile_Call struct { + *mock.Call +} + +// NewFile is a helper method to define mock.On call +// - volume string +// - absFilePath string +func (_e *FileSystem_Expecter) NewFile(volume interface{}, absFilePath interface{}) *FileSystem_NewFile_Call { + return &FileSystem_NewFile_Call{Call: _e.mock.On("NewFile", volume, absFilePath)} +} + +func (_c *FileSystem_NewFile_Call) Run(run func(volume string, absFilePath string)) *FileSystem_NewFile_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(string)) + }) + return _c +} + +func (_c *FileSystem_NewFile_Call) Return(_a0 vfs.File, _a1 error) *FileSystem_NewFile_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *FileSystem_NewFile_Call) RunAndReturn(run func(string, string) (vfs.File, error)) *FileSystem_NewFile_Call { + _c.Call.Return(run) + return _c +} + +// NewLocation provides a mock function with given fields: volume, absLocPath +func (_m *FileSystem) NewLocation(volume string, absLocPath string) (vfs.Location, error) { + ret := _m.Called(volume, absLocPath) + + if len(ret) == 0 { + panic("no return value specified for NewLocation") + } + + var r0 vfs.Location + var r1 error + if rf, ok := ret.Get(0).(func(string, string) (vfs.Location, error)); ok { + return rf(volume, absLocPath) + } + if rf, ok := ret.Get(0).(func(string, string) vfs.Location); ok { + r0 = rf(volume, absLocPath) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(vfs.Location) + } + } + + if rf, ok := ret.Get(1).(func(string, string) error); ok { + r1 = rf(volume, absLocPath) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FileSystem_NewLocation_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewLocation' +type FileSystem_NewLocation_Call struct { + *mock.Call +} + +// NewLocation is a helper method to define mock.On call +// - volume string +// - absLocPath string +func (_e *FileSystem_Expecter) NewLocation(volume interface{}, absLocPath interface{}) *FileSystem_NewLocation_Call { + return &FileSystem_NewLocation_Call{Call: _e.mock.On("NewLocation", volume, absLocPath)} +} + +func (_c *FileSystem_NewLocation_Call) Run(run func(volume string, absLocPath string)) *FileSystem_NewLocation_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string), args[1].(string)) + }) + return _c +} + +func (_c *FileSystem_NewLocation_Call) Return(_a0 vfs.Location, _a1 error) *FileSystem_NewLocation_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *FileSystem_NewLocation_Call) RunAndReturn(run func(string, string) (vfs.Location, error)) *FileSystem_NewLocation_Call { + _c.Call.Return(run) + return _c +} + +// Retry provides a mock function with given fields: +func (_m *FileSystem) Retry() vfs.Retry { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Retry") + } + + var r0 vfs.Retry + if rf, ok := ret.Get(0).(func() vfs.Retry); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(vfs.Retry) + } + } + + return r0 +} + +// FileSystem_Retry_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Retry' +type FileSystem_Retry_Call struct { + *mock.Call +} + +// Retry is a helper method to define mock.On call +func (_e *FileSystem_Expecter) Retry() *FileSystem_Retry_Call { + return &FileSystem_Retry_Call{Call: _e.mock.On("Retry")} +} + +func (_c *FileSystem_Retry_Call) Run(run func()) *FileSystem_Retry_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *FileSystem_Retry_Call) Return(_a0 vfs.Retry) *FileSystem_Retry_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *FileSystem_Retry_Call) RunAndReturn(run func() vfs.Retry) *FileSystem_Retry_Call { + _c.Call.Return(run) + return _c +} + +// Scheme provides a mock function with given fields: +func (_m *FileSystem) Scheme() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Scheme") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// FileSystem_Scheme_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Scheme' +type FileSystem_Scheme_Call struct { + *mock.Call +} + +// Scheme is a helper method to define mock.On call +func (_e *FileSystem_Expecter) Scheme() *FileSystem_Scheme_Call { + return &FileSystem_Scheme_Call{Call: _e.mock.On("Scheme")} +} + +func (_c *FileSystem_Scheme_Call) Run(run func()) *FileSystem_Scheme_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *FileSystem_Scheme_Call) Return(_a0 string) *FileSystem_Scheme_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *FileSystem_Scheme_Call) RunAndReturn(run func() string) *FileSystem_Scheme_Call { + _c.Call.Return(run) + return _c +} + +// NewFileSystem creates a new instance of FileSystem. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewFileSystem(t interface { + mock.TestingT + Cleanup(func()) +}) *FileSystem { + mock := &FileSystem{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/vendor/github.com/c2fo/vfs/v6/mocks/Location.go b/vendor/github.com/c2fo/vfs/v6/mocks/Location.go new file mode 100644 index 000000000..dd2c46bc4 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/mocks/Location.go @@ -0,0 +1,717 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mocks + +import ( + options "github.com/c2fo/vfs/v6/options" + mock "github.com/stretchr/testify/mock" + + regexp "regexp" + + vfs "github.com/c2fo/vfs/v6" +) + +// Location is an autogenerated mock type for the Location type +type Location struct { + mock.Mock +} + +type Location_Expecter struct { + mock *mock.Mock +} + +func (_m *Location) EXPECT() *Location_Expecter { + return &Location_Expecter{mock: &_m.Mock} +} + +// ChangeDir provides a mock function with given fields: relLocPath +func (_m *Location) ChangeDir(relLocPath string) error { + ret := _m.Called(relLocPath) + + if len(ret) == 0 { + panic("no return value specified for ChangeDir") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(relLocPath) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Location_ChangeDir_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ChangeDir' +type Location_ChangeDir_Call struct { + *mock.Call +} + +// ChangeDir is a helper method to define mock.On call +// - relLocPath string +func (_e *Location_Expecter) ChangeDir(relLocPath interface{}) *Location_ChangeDir_Call { + return &Location_ChangeDir_Call{Call: _e.mock.On("ChangeDir", relLocPath)} +} + +func (_c *Location_ChangeDir_Call) Run(run func(relLocPath string)) *Location_ChangeDir_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *Location_ChangeDir_Call) Return(_a0 error) *Location_ChangeDir_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Location_ChangeDir_Call) RunAndReturn(run func(string) error) *Location_ChangeDir_Call { + _c.Call.Return(run) + return _c +} + +// DeleteFile provides a mock function with given fields: relFilePath, deleteOpts +func (_m *Location) DeleteFile(relFilePath string, deleteOpts ...options.DeleteOption) error { + _va := make([]interface{}, len(deleteOpts)) + for _i := range deleteOpts { + _va[_i] = deleteOpts[_i] + } + var _ca []interface{} + _ca = append(_ca, relFilePath) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for DeleteFile") + } + + var r0 error + if rf, ok := ret.Get(0).(func(string, ...options.DeleteOption) error); ok { + r0 = rf(relFilePath, deleteOpts...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Location_DeleteFile_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteFile' +type Location_DeleteFile_Call struct { + *mock.Call +} + +// DeleteFile is a helper method to define mock.On call +// - relFilePath string +// - deleteOpts ...options.DeleteOption +func (_e *Location_Expecter) DeleteFile(relFilePath interface{}, deleteOpts ...interface{}) *Location_DeleteFile_Call { + return &Location_DeleteFile_Call{Call: _e.mock.On("DeleteFile", + append([]interface{}{relFilePath}, deleteOpts...)...)} +} + +func (_c *Location_DeleteFile_Call) Run(run func(relFilePath string, deleteOpts ...options.DeleteOption)) *Location_DeleteFile_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]options.DeleteOption, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(options.DeleteOption) + } + } + run(args[0].(string), variadicArgs...) + }) + return _c +} + +func (_c *Location_DeleteFile_Call) Return(_a0 error) *Location_DeleteFile_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Location_DeleteFile_Call) RunAndReturn(run func(string, ...options.DeleteOption) error) *Location_DeleteFile_Call { + _c.Call.Return(run) + return _c +} + +// Exists provides a mock function with given fields: +func (_m *Location) Exists() (bool, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Exists") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func() (bool, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Location_Exists_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Exists' +type Location_Exists_Call struct { + *mock.Call +} + +// Exists is a helper method to define mock.On call +func (_e *Location_Expecter) Exists() *Location_Exists_Call { + return &Location_Exists_Call{Call: _e.mock.On("Exists")} +} + +func (_c *Location_Exists_Call) Run(run func()) *Location_Exists_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Location_Exists_Call) Return(_a0 bool, _a1 error) *Location_Exists_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Location_Exists_Call) RunAndReturn(run func() (bool, error)) *Location_Exists_Call { + _c.Call.Return(run) + return _c +} + +// FileSystem provides a mock function with given fields: +func (_m *Location) FileSystem() vfs.FileSystem { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for FileSystem") + } + + var r0 vfs.FileSystem + if rf, ok := ret.Get(0).(func() vfs.FileSystem); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(vfs.FileSystem) + } + } + + return r0 +} + +// Location_FileSystem_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FileSystem' +type Location_FileSystem_Call struct { + *mock.Call +} + +// FileSystem is a helper method to define mock.On call +func (_e *Location_Expecter) FileSystem() *Location_FileSystem_Call { + return &Location_FileSystem_Call{Call: _e.mock.On("FileSystem")} +} + +func (_c *Location_FileSystem_Call) Run(run func()) *Location_FileSystem_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Location_FileSystem_Call) Return(_a0 vfs.FileSystem) *Location_FileSystem_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Location_FileSystem_Call) RunAndReturn(run func() vfs.FileSystem) *Location_FileSystem_Call { + _c.Call.Return(run) + return _c +} + +// List provides a mock function with given fields: +func (_m *Location) List() ([]string, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for List") + } + + var r0 []string + var r1 error + if rf, ok := ret.Get(0).(func() ([]string, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []string); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Location_List_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'List' +type Location_List_Call struct { + *mock.Call +} + +// List is a helper method to define mock.On call +func (_e *Location_Expecter) List() *Location_List_Call { + return &Location_List_Call{Call: _e.mock.On("List")} +} + +func (_c *Location_List_Call) Run(run func()) *Location_List_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Location_List_Call) Return(_a0 []string, _a1 error) *Location_List_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Location_List_Call) RunAndReturn(run func() ([]string, error)) *Location_List_Call { + _c.Call.Return(run) + return _c +} + +// ListByPrefix provides a mock function with given fields: prefix +func (_m *Location) ListByPrefix(prefix string) ([]string, error) { + ret := _m.Called(prefix) + + if len(ret) == 0 { + panic("no return value specified for ListByPrefix") + } + + var r0 []string + var r1 error + if rf, ok := ret.Get(0).(func(string) ([]string, error)); ok { + return rf(prefix) + } + if rf, ok := ret.Get(0).(func(string) []string); ok { + r0 = rf(prefix) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(prefix) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Location_ListByPrefix_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListByPrefix' +type Location_ListByPrefix_Call struct { + *mock.Call +} + +// ListByPrefix is a helper method to define mock.On call +// - prefix string +func (_e *Location_Expecter) ListByPrefix(prefix interface{}) *Location_ListByPrefix_Call { + return &Location_ListByPrefix_Call{Call: _e.mock.On("ListByPrefix", prefix)} +} + +func (_c *Location_ListByPrefix_Call) Run(run func(prefix string)) *Location_ListByPrefix_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *Location_ListByPrefix_Call) Return(_a0 []string, _a1 error) *Location_ListByPrefix_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Location_ListByPrefix_Call) RunAndReturn(run func(string) ([]string, error)) *Location_ListByPrefix_Call { + _c.Call.Return(run) + return _c +} + +// ListByRegex provides a mock function with given fields: regex +func (_m *Location) ListByRegex(regex *regexp.Regexp) ([]string, error) { + ret := _m.Called(regex) + + if len(ret) == 0 { + panic("no return value specified for ListByRegex") + } + + var r0 []string + var r1 error + if rf, ok := ret.Get(0).(func(*regexp.Regexp) ([]string, error)); ok { + return rf(regex) + } + if rf, ok := ret.Get(0).(func(*regexp.Regexp) []string); ok { + r0 = rf(regex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + if rf, ok := ret.Get(1).(func(*regexp.Regexp) error); ok { + r1 = rf(regex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Location_ListByRegex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListByRegex' +type Location_ListByRegex_Call struct { + *mock.Call +} + +// ListByRegex is a helper method to define mock.On call +// - regex *regexp.Regexp +func (_e *Location_Expecter) ListByRegex(regex interface{}) *Location_ListByRegex_Call { + return &Location_ListByRegex_Call{Call: _e.mock.On("ListByRegex", regex)} +} + +func (_c *Location_ListByRegex_Call) Run(run func(regex *regexp.Regexp)) *Location_ListByRegex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*regexp.Regexp)) + }) + return _c +} + +func (_c *Location_ListByRegex_Call) Return(_a0 []string, _a1 error) *Location_ListByRegex_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Location_ListByRegex_Call) RunAndReturn(run func(*regexp.Regexp) ([]string, error)) *Location_ListByRegex_Call { + _c.Call.Return(run) + return _c +} + +// NewFile provides a mock function with given fields: relFilePath +func (_m *Location) NewFile(relFilePath string) (vfs.File, error) { + ret := _m.Called(relFilePath) + + if len(ret) == 0 { + panic("no return value specified for NewFile") + } + + var r0 vfs.File + var r1 error + if rf, ok := ret.Get(0).(func(string) (vfs.File, error)); ok { + return rf(relFilePath) + } + if rf, ok := ret.Get(0).(func(string) vfs.File); ok { + r0 = rf(relFilePath) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(vfs.File) + } + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(relFilePath) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Location_NewFile_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewFile' +type Location_NewFile_Call struct { + *mock.Call +} + +// NewFile is a helper method to define mock.On call +// - relFilePath string +func (_e *Location_Expecter) NewFile(relFilePath interface{}) *Location_NewFile_Call { + return &Location_NewFile_Call{Call: _e.mock.On("NewFile", relFilePath)} +} + +func (_c *Location_NewFile_Call) Run(run func(relFilePath string)) *Location_NewFile_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *Location_NewFile_Call) Return(_a0 vfs.File, _a1 error) *Location_NewFile_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Location_NewFile_Call) RunAndReturn(run func(string) (vfs.File, error)) *Location_NewFile_Call { + _c.Call.Return(run) + return _c +} + +// NewLocation provides a mock function with given fields: relLocPath +func (_m *Location) NewLocation(relLocPath string) (vfs.Location, error) { + ret := _m.Called(relLocPath) + + if len(ret) == 0 { + panic("no return value specified for NewLocation") + } + + var r0 vfs.Location + var r1 error + if rf, ok := ret.Get(0).(func(string) (vfs.Location, error)); ok { + return rf(relLocPath) + } + if rf, ok := ret.Get(0).(func(string) vfs.Location); ok { + r0 = rf(relLocPath) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(vfs.Location) + } + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(relLocPath) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Location_NewLocation_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewLocation' +type Location_NewLocation_Call struct { + *mock.Call +} + +// NewLocation is a helper method to define mock.On call +// - relLocPath string +func (_e *Location_Expecter) NewLocation(relLocPath interface{}) *Location_NewLocation_Call { + return &Location_NewLocation_Call{Call: _e.mock.On("NewLocation", relLocPath)} +} + +func (_c *Location_NewLocation_Call) Run(run func(relLocPath string)) *Location_NewLocation_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *Location_NewLocation_Call) Return(_a0 vfs.Location, _a1 error) *Location_NewLocation_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Location_NewLocation_Call) RunAndReturn(run func(string) (vfs.Location, error)) *Location_NewLocation_Call { + _c.Call.Return(run) + return _c +} + +// Path provides a mock function with given fields: +func (_m *Location) Path() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Path") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Location_Path_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Path' +type Location_Path_Call struct { + *mock.Call +} + +// Path is a helper method to define mock.On call +func (_e *Location_Expecter) Path() *Location_Path_Call { + return &Location_Path_Call{Call: _e.mock.On("Path")} +} + +func (_c *Location_Path_Call) Run(run func()) *Location_Path_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Location_Path_Call) Return(_a0 string) *Location_Path_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Location_Path_Call) RunAndReturn(run func() string) *Location_Path_Call { + _c.Call.Return(run) + return _c +} + +// String provides a mock function with given fields: +func (_m *Location) String() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for String") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Location_String_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'String' +type Location_String_Call struct { + *mock.Call +} + +// String is a helper method to define mock.On call +func (_e *Location_Expecter) String() *Location_String_Call { + return &Location_String_Call{Call: _e.mock.On("String")} +} + +func (_c *Location_String_Call) Run(run func()) *Location_String_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Location_String_Call) Return(_a0 string) *Location_String_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Location_String_Call) RunAndReturn(run func() string) *Location_String_Call { + _c.Call.Return(run) + return _c +} + +// URI provides a mock function with given fields: +func (_m *Location) URI() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for URI") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Location_URI_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'URI' +type Location_URI_Call struct { + *mock.Call +} + +// URI is a helper method to define mock.On call +func (_e *Location_Expecter) URI() *Location_URI_Call { + return &Location_URI_Call{Call: _e.mock.On("URI")} +} + +func (_c *Location_URI_Call) Run(run func()) *Location_URI_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Location_URI_Call) Return(_a0 string) *Location_URI_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Location_URI_Call) RunAndReturn(run func() string) *Location_URI_Call { + _c.Call.Return(run) + return _c +} + +// Volume provides a mock function with given fields: +func (_m *Location) Volume() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Volume") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Location_Volume_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Volume' +type Location_Volume_Call struct { + *mock.Call +} + +// Volume is a helper method to define mock.On call +func (_e *Location_Expecter) Volume() *Location_Volume_Call { + return &Location_Volume_Call{Call: _e.mock.On("Volume")} +} + +func (_c *Location_Volume_Call) Run(run func()) *Location_Volume_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Location_Volume_Call) Return(_a0 string) *Location_Volume_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Location_Volume_Call) RunAndReturn(run func() string) *Location_Volume_Call { + _c.Call.Return(run) + return _c +} + +// NewLocation creates a new instance of Location. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLocation(t interface { + mock.TestingT + Cleanup(func()) +}) *Location { + mock := &Location{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/vendor/github.com/c2fo/vfs/v6/mocks/Options.go b/vendor/github.com/c2fo/vfs/v6/mocks/Options.go new file mode 100644 index 000000000..54c5f5f5f --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/mocks/Options.go @@ -0,0 +1,32 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// Options is an autogenerated mock type for the Options type +type Options struct { + mock.Mock +} + +type Options_Expecter struct { + mock *mock.Mock +} + +func (_m *Options) EXPECT() *Options_Expecter { + return &Options_Expecter{mock: &_m.Mock} +} + +// NewOptions creates a new instance of Options. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewOptions(t interface { + mock.TestingT + Cleanup(func()) +}) *Options { + mock := &Options{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/vendor/github.com/c2fo/vfs/v6/mocks/Retry.go b/vendor/github.com/c2fo/vfs/v6/mocks/Retry.go new file mode 100644 index 000000000..fcbec6e57 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/mocks/Retry.go @@ -0,0 +1,78 @@ +// Code generated by mockery v2.43.2. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// Retry is an autogenerated mock type for the Retry type +type Retry struct { + mock.Mock +} + +type Retry_Expecter struct { + mock *mock.Mock +} + +func (_m *Retry) EXPECT() *Retry_Expecter { + return &Retry_Expecter{mock: &_m.Mock} +} + +// Execute provides a mock function with given fields: wrapped +func (_m *Retry) Execute(wrapped func() error) error { + ret := _m.Called(wrapped) + + if len(ret) == 0 { + panic("no return value specified for Execute") + } + + var r0 error + if rf, ok := ret.Get(0).(func(func() error) error); ok { + r0 = rf(wrapped) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Retry_Execute_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Execute' +type Retry_Execute_Call struct { + *mock.Call +} + +// Execute is a helper method to define mock.On call +// - wrapped func() error +func (_e *Retry_Expecter) Execute(wrapped interface{}) *Retry_Execute_Call { + return &Retry_Execute_Call{Call: _e.mock.On("Execute", wrapped)} +} + +func (_c *Retry_Execute_Call) Run(run func(wrapped func() error)) *Retry_Execute_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(func() error)) + }) + return _c +} + +func (_c *Retry_Execute_Call) Return(_a0 error) *Retry_Execute_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Retry_Execute_Call) RunAndReturn(run func(func() error) error) *Retry_Execute_Call { + _c.Call.Return(run) + return _c +} + +// NewRetry creates a new instance of Retry. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewRetry(t interface { + mock.TestingT + Cleanup(func()) +}) *Retry { + mock := &Retry{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/vendor/github.com/c2fo/vfs/v6/mocks/StringFile.go b/vendor/github.com/c2fo/vfs/v6/mocks/StringFile.go new file mode 100644 index 000000000..2e5273a3c --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/mocks/StringFile.go @@ -0,0 +1,89 @@ +package mocks + +import ( + "bytes" + "io" + "os" + "path/filepath" + "strings" + + "github.com/stretchr/testify/mock" +) + +// NewStringFile creates a new ReadWriteFile instance that can be read from the provided string as it's contents. +func NewStringFile(data, fileName string) *ReadWriteFile { + buffer := &bytes.Buffer{} + file := &ReadWriteFile{ + File: File{}, + Reader: strings.NewReader(data), + Writer: buffer, + Buffer: buffer, + ReaderContent: data, + } + + // Set default expectations for file operations + file.On("Read", mock.Anything).Return(len(data), nil) + file.On("Write", mock.Anything).Return(len(data), nil) + file.On("Close").Return(nil) + file.On("Name").Return(fileName) + + return file +} + +// NewMockFromFilepath creates a new ReadWriteFile instance that can read a file from the provided path. +func NewMockFromFilepath(filePath string) *ReadWriteFile { + data, err := os.ReadFile(filePath) + if err != nil { + data = make([]byte, 0) + } + buffer := &bytes.Buffer{} + file := &ReadWriteFile{ + File: File{}, + Reader: strings.NewReader(string(data)), + Writer: buffer, + Buffer: buffer, + ReaderContent: string(data), + } + + // Set default expectations for file operations + file.On("Read", mock.Anything).Return(len(data), nil) + file.On("Write", mock.Anything).Return(len(data), nil) + file.On("Close").Return(nil) + file.On("Name").Return(filepath.Base(filePath)) + + return file +} + +// ReadWriteFile is a custom mock which allows the consumer to assign a custom reader and writer for +// easily mocking file contents. +type ReadWriteFile struct { + File + Reader io.Reader + Writer io.Writer + Buffer *bytes.Buffer + ReaderContent string +} + +// Read statisfieds io.Reader interface +func (f *ReadWriteFile) Read(p []byte) (n int, err error) { + // Deal with mocks for potential assertions + n, err = f.File.Read(p) + if err != nil { + return + } + return f.Reader.Read(p) +} + +// Write statisfieds io.Writer interface +func (f *ReadWriteFile) Write(p []byte) (n int, err error) { + n, err = f.File.Write(p) + if err != nil { + return + } + return f.Writer.Write(p) +} + +// Content returns the data held by the ReadWriteFile struct +func (f *ReadWriteFile) Content() string { + return f.Buffer.String() +} diff --git a/vendor/github.com/c2fo/vfs/v6/options/delete/deleteAllVersions.go b/vendor/github.com/c2fo/vfs/v6/options/delete/deleteAllVersions.go new file mode 100644 index 000000000..2808d262a --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/options/delete/deleteAllVersions.go @@ -0,0 +1,19 @@ +package delete + +import "github.com/c2fo/vfs/v6/options" + +const optionNameDeleteAllVersions = "deleteAllVersions" + +// WithDeleteAllVersions returns DeleteAllVersions implementation of delete.DeleteOption +func WithDeleteAllVersions() options.DeleteOption { + return DeleteAllVersions{} +} + +// DeleteAllVersions represents the DeleteOption that is used to remove all versions of files when deleted. +// This will remove all versions of files for the filesystems that support file versioning. +type DeleteAllVersions struct{} + +// DeleteOptionName returns the name of DeleteAllVersions option +func (w DeleteAllVersions) DeleteOptionName() string { + return optionNameDeleteAllVersions +} diff --git a/vendor/github.com/c2fo/vfs/v6/options/options.go b/vendor/github.com/c2fo/vfs/v6/options/options.go new file mode 100644 index 000000000..558d14381 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/options/options.go @@ -0,0 +1,18 @@ +package options + +// DeleteOption interface contains function that should be implemented by any custom option to qualify as a delete option. +// Example: +// ``` +// +// type TakeBackupDeleteOption{} +// func (o TakeBackupDeleteOption) DeleteOptionName() string { +// return "take backup" +// } +// func (o TakeBackupDeleteOption) BackupLocation() string { +// return o.backupLocation +// } +// +// ``` +type DeleteOption interface { + DeleteOptionName() string +} diff --git a/vendor/github.com/c2fo/vfs/v6/utils/authority.go b/vendor/github.com/c2fo/vfs/v6/utils/authority.go new file mode 100644 index 000000000..4694bc269 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/utils/authority.go @@ -0,0 +1,225 @@ +package utils + +import ( + "errors" + "fmt" + "net/url" + "regexp" + "strconv" + "strings" +) + +/* + URI parlance (see https://www.rfc-editor.org/rfc/rfc3986.html#section-3.2): + + foo://example.com:8042/over/there?name=ferret#nose + \_/ \______________/\_________/ \_________/ \__/ + | | | | | + scheme authority path query fragment + + Where: + authority = [ userinfo "@" ] host [ ":" port ] + userinfo = *( unreserved / pct-encoded / sub-delims / ":" ) + host = IP-literal / IPv4address / reg-name + port = *DIGIT + reg-name = *( unreserved / pct-encoded / sub-delims ) + unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" + sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "=" + pct-encoded = "%" HEXDIG HEXDIG +*/ + +// Authority represents host, port and userinfo (user/pass) in a URI +type Authority struct { + host string + port uint16 + url *url.URL +} + +// UserInfo represents user/pass portion of a URI +type UserInfo struct { + url *url.URL +} + +// Username returns the username of a URI UserInfo. May be an empty string. +func (u UserInfo) Username() string { + return u.url.User.Username() +} + +// Password returns the password of a URI UserInfo. May be an empty string. +func (u UserInfo) Password() string { + p, _ := u.url.User.Password() + return p +} + +// String() returns a string representation of authority. It does not include password per +// https://tools.ietf.org/html/rfc3986#section-3.2.1 +// +// Applications should not render as clear text any data after the first colon (":") character found within a userinfo +// subcomponent unless the data after the colon is the empty string (indicating no password). +func (a Authority) String() string { + authority := a.HostPortStr() + if a.UserInfo().Username() != "" { + authority = fmt.Sprintf("%s@%s", a.UserInfo().Username(), authority) + } + return authority +} + +// UserInfo returns the userinfo section of authority. userinfo is username and password(deprecated). +func (a Authority) UserInfo() UserInfo { + return UserInfo{ + url: a.url, + } +} + +// Host returns the host portion of an authority +func (a Authority) Host() string { + return a.url.Hostname() +} + +// Port returns the port portion of an authority +func (a Authority) Port() uint16 { + return a.port +} + +// HostPortStr returns a concatenated string of host and port from authority, separated by a colon, ie "host.com:1234" +func (a Authority) HostPortStr() string { + if a.Port() != 0 { + return fmt.Sprintf("%s:%d", a.Host(), a.Port()) + } + return a.Host() +} + +var schemeRE = regexp.MustCompile("^[A-Za-z][A-Za-z0-9+.-]*://") + +// NewAuthority initializes Authority struct by parsing authority string. +func NewAuthority(authority string) (Authority, error) { + if authority == "" { + return Authority{}, errors.New("authority string may not be empty") + } + + var err error + matched := schemeRE.MatchString(authority) + if !matched { + authority = "scheme://" + authority + } + + u, err := url.Parse(authority) + if err != nil { + return Authority{}, err + } + + host, portStr := splitHostPort(u.Host) + var port uint16 + if portStr != "" { + val, err := strconv.ParseUint(portStr, 10, 16) + if err != nil { + return Authority{}, err + } + port = uint16(val) + } + + return Authority{ + host: host, + port: port, + url: u, + }, nil +} + +// splitHostPort separates host and port. If the port is not valid, it returns +// the entire input as host, and it doesn't check the validity of the host. +// Unlike net.SplitHostPort, but per RFC 3986, it requires ports to be numeric. +func splitHostPort(hostPort string) (host, port string) { + host = hostPort + + colon := strings.LastIndexByte(host, ':') + if colon != -1 && validOptionalPort(host[colon:]) { + host, port = host[:colon], host[colon+1:] + } + + if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { + host = host[1 : len(host)-1] + } + + return +} + +// validOptionalPort reports whether port is either an empty string +// or matches /^:\d*$/ +func validOptionalPort(port string) bool { + if port == "" { + return true + } + if port[0] != ':' { + return false + } + for _, b := range port[1:] { + if b < '0' || b > '9' { + return false + } + } + return true +} + +// EncodeUserInfo takes an unencoded URI authority userinfo string and encodes it +func EncodeUserInfo(rawUserInfo string) string { + parts := strings.SplitN(rawUserInfo, ":", 2) + encodedParts := make([]string, len(parts)) + for i, part := range parts { + encoded := url.QueryEscape(part) + decoded := strings.NewReplacer( + "%21", "!", "%24", "$", "%26", "&", "%27", "'", + "%28", "(", "%29", ")", "%2A", "*", "%2B", "+", + "%2C", ",", "%3B", ";", "%3D", "=", + ).Replace(encoded) + encodedParts[i] = decoded + } + return strings.Join(encodedParts, ":") +} + +// EncodeAuthority takes an unencoded URI authority string and encodes it +func EncodeAuthority(rawAuthority string) string { + var userInfo, hostPort string + + // Split the authority into user info and hostPort + atIndex := strings.LastIndex(rawAuthority, "@") + if atIndex != -1 { + userInfo = rawAuthority[:atIndex] + hostPort = rawAuthority[atIndex+1:] + } else { + hostPort = rawAuthority + } + + // Encode userInfo if present + if userInfo != "" { + userInfo = EncodeUserInfo(userInfo) + } + + // Split host and port + var host, port string + hostPortSplit := strings.SplitN(hostPort, ":", 2) + if len(hostPortSplit) > 0 { + host = hostPortSplit[0] + } + if len(hostPortSplit) > 1 { + port = hostPortSplit[1] + } + + // Encode host and port + encodedHost := url.QueryEscape(host) + var encodedPort string + if port != "" { + encodedPort = url.QueryEscape(port) + } + + // Reconstruct the encoded authority string + var encodedAuthority string + if userInfo != "" { + encodedAuthority = userInfo + "@" + } + encodedAuthority += encodedHost + if encodedPort != "" { + encodedAuthority += ":" + encodedPort + } + + return encodedAuthority +} diff --git a/vendor/github.com/c2fo/vfs/v6/utils/errors.go b/vendor/github.com/c2fo/vfs/v6/utils/errors.go new file mode 100644 index 000000000..9b31ff95b --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/utils/errors.go @@ -0,0 +1,23 @@ +package utils + +import "fmt" + +// WrapReadError returns a wrapped read error +func WrapReadError(err error) error { + return fmt.Errorf("read error: %w", err) +} + +// WrapSeekError returns a wrapped seek error +func WrapSeekError(err error) error { + return fmt.Errorf("seek error: %w", err) +} + +// WrapWriteError returns a wrapped write error +func WrapWriteError(err error) error { + return fmt.Errorf("write error: %w", err) +} + +// WrapCloseError returns a wrapped close error +func WrapCloseError(err error) error { + return fmt.Errorf("close error: %w", err) +} diff --git a/vendor/github.com/c2fo/vfs/v6/utils/utils.go b/vendor/github.com/c2fo/vfs/v6/utils/utils.go new file mode 100644 index 000000000..a19545833 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/utils/utils.go @@ -0,0 +1,264 @@ +package utils + +import ( + "errors" + "fmt" + "io" + "net/url" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/c2fo/vfs/v6" +) + +const ( + // ErrBadAbsFilePath constant is returned when a file path is not absolute + ErrBadAbsFilePath = "absolute file path is invalid - must include leading slash and may not include trailing slash" + // ErrBadRelFilePath constant is returned when a file path is not relative + ErrBadRelFilePath = "relative file path is invalid - may not include leading or trailing slashes" + // ErrBadAbsLocationPath constant is returned when a file path is not absolute + ErrBadAbsLocationPath = "absolute location path is invalid - must include leading and trailing slashes" + // ErrBadRelLocationPath constant is returned when a file path is not relative + ErrBadRelLocationPath = "relative location path is invalid - may not include leading slash but must include trailing slash" + // ErrBadPrefix constant is returned when a prefix is not relative or ends in / or is empty + ErrBadPrefix = "prefix is invalid - may not include leading or trailing slashes and may not be empty" + // TouchCopyMinBufferSize min buffer size used in TouchCopyBuffered in bytes + TouchCopyMinBufferSize = 262144 +) + +// regex to test whether the last character is a '/' +var hasTrailingSlash = regexp.MustCompile("/$") + +// regex to test whether the first character is a '/' +var hasLeadingSlash = regexp.MustCompile("^/") + +// RemoveTrailingSlash removes trailing slash, if any +func RemoveTrailingSlash(path string) string { + return strings.TrimRight(path, "/") +} + +// RemoveLeadingSlash removes leading slash, if any +func RemoveLeadingSlash(path string) string { + return strings.TrimLeft(path, "/") +} + +// ValidateAbsoluteFilePath ensures that a file path has a leading slash but not a trailing slash +func ValidateAbsoluteFilePath(name string) error { + if !strings.HasPrefix(name, "/") || strings.HasSuffix(name, "/") { + return errors.New(ErrBadAbsFilePath) + } + return nil +} + +// ValidateRelativeFilePath ensures that a file path has neither leading nor trailing slashes +func ValidateRelativeFilePath(name string) error { + if name == "" || name == "." || strings.HasPrefix(name, "/") || strings.HasSuffix(name, "/") { + return errors.New(ErrBadRelFilePath) + } + return nil +} + +// ValidateAbsoluteLocationPath ensure that a file path has both leading and trailing slashes +func ValidateAbsoluteLocationPath(name string) error { + if !strings.HasPrefix(name, "/") || !strings.HasSuffix(name, "/") { + return errors.New(ErrBadAbsLocationPath) + } + return nil +} + +// ValidateRelativeLocationPath ensure that a file path has no leading slash but has a trailing slash +func ValidateRelativeLocationPath(name string) error { + if strings.HasPrefix(name, "/") || !strings.HasSuffix(name, "/") { + return errors.New(ErrBadRelLocationPath) + } + return nil +} + +// ValidatePrefix ensures that a prefix path has neither leading nor trailing slashes +// may not be empty but unlike relative file path, *may* be simply "." +func ValidatePrefix(prefix string) error { + if prefix == "" || strings.HasPrefix(prefix, "/") || strings.HasSuffix(prefix, "/") { + return errors.New(ErrBadPrefix) + } + return nil +} + +// GetFileURI returns a File URI +func GetFileURI(f vfs.File) string { + return fmt.Sprintf("%s://%s%s", f.Location().FileSystem().Scheme(), f.Location().Volume(), f.Path()) +} + +// GetLocationURI returns a Location URI +func GetLocationURI(l vfs.Location) string { + + return fmt.Sprintf("%s://%s%s", l.FileSystem().Scheme(), l.Volume(), l.Path()) +} + +// EnsureTrailingSlash is like AddTrailingSlash but will only ever use / since it's use for web uri's, never a Windows OS path. +func EnsureTrailingSlash(dir string) string { + if hasTrailingSlash.MatchString(dir) { + return dir + } + return dir + "/" +} + +// EnsureLeadingSlash is like EnsureTrailingSlash except that it adds the leading slash if needed. +func EnsureLeadingSlash(dir string) string { + if hasLeadingSlash.MatchString(dir) { + return dir + } + return "/" + dir +} + +// PathToURI takes a relative or absolute path and returns an OS URI. +// We assume non-scheme path is an OS File or Location. +// We assume volume(URI authority) is empty. +// We assume relative paths are relative to the pwd (program's working directory) +// +// /absolute/path/to/file.txt : file:///absolute/path/to/file.txt +// /some/absolute/path/ : file:///absolute/path/ +// relative/path/to/file.txt : file:///absolute/path/with/relative/path/to/file.txt +// relative/path/ : file:///absolute/path/with/relative/path/ +func PathToURI(p string) (string, error) { + if p == "" { + p = "/" + } + var URI string + + // parse path + u, err := url.Parse(p) + if err != nil { + return "", err + } + + // if scheme is found, its already a URI + if u.Scheme != "" { + return p, nil + } + + // make absolute path (if not already) + absPath, err := filepath.Abs(p) + if err != nil { + return "", err + } + + // Abs() strips trailing slashes so add back if original path had slash + if p[len(p)-1:] == "/" { + absPath = EnsureTrailingSlash(absPath) + } + + URI = "file://" + absPath + + return URI, err +} + +// TouchCopy is a wrapper around io.Copy which ensures that even empty source files (reader) will get written as an +// empty file. It guarantees a Write() call on the target file. +// Deprecated: Use TouchCopyBuffer Instead +func TouchCopy(writer io.Writer, reader io.Reader) error { + size, err := io.Copy(writer, reader) + if err != nil { + return err + } + if size == 0 { + _, err = writer.Write([]byte{}) + if err != nil { + return err + } + } + return nil +} + +// TouchCopyBuffered is a wrapper around io.CopyBuffer which ensures that even empty source files (reader) will get written as an +// empty file. It guarantees a Write() call on the target file. +// bufferSize is in bytes and if is less than TouchCopyMinBufferSize will result in a buffer of size TouchCopyMinBufferSize +// bytes. If bufferSize is > TouchCopyMinBufferSize it will result in a buffer of size bufferSize bytes +func TouchCopyBuffered(writer io.Writer, reader io.Reader, bufferSize int) error { + var buffer []byte + var size int64 + var err error + + if bufferSize <= 0 { + // Use TouchCopyMinBufferSize + buffer = make([]byte, TouchCopyMinBufferSize) + size, err = io.CopyBuffer(writer, reader, buffer) + } else { + // Otherwise use provided bufferSize + buffer = make([]byte, bufferSize) + size, err = io.CopyBuffer(writer, reader, buffer) + } + + if err != nil { + return err + } + if size == 0 { + _, err = writer.Write([]byte{}) + if err != nil { + return err + } + } + return nil +} + +// UpdateLastModifiedByMoving is used by some backends' Touch() method when a file already exists. +func UpdateLastModifiedByMoving(file vfs.File) error { + // setup a tempfile + tempfile, err := file.Location(). + NewFile(fmt.Sprintf("%s.%d", file.Name(), time.Now().UnixNano())) + if err != nil { + return err + } + + // copy file file to tempfile + err = file.CopyToFile(tempfile) + if err != nil { + return err + } + + // move tempfile back to file + err = tempfile.MoveToFile(file) + if err != nil { + return err + } + return nil +} + +// SeekTo is a helper function for Seek. It takes the current position, offset, whence, and length of the file +// and returns the new position. It also checks for invalid offsets and returns an error if one is found. +func SeekTo(length, position, offset int64, whence int) (int64, error) { + + switch whence { + default: + return 0, vfs.ErrSeekInvalidWhence + case io.SeekStart: + // this actually does nothing since the new position just becomes the offset but is here for completeness + case io.SeekCurrent: + offset += position + case io.SeekEnd: + offset += length + } + if offset < 0 { + return 0, vfs.ErrSeekInvalidOffset + } + + return offset, nil +} + +// EncodeURI ensure that a uri is properly percent-encoded +func EncodeURI(scheme, username, hostport, path string) string { + u := &url.URL{ + Scheme: scheme, + User: url.User(username), + Host: hostport, + Path: path, + } + + return u.String() +} + +// Ptr returns a pointer to the given value. +func Ptr[T any](value T) *T { + return &value +} diff --git a/vendor/github.com/c2fo/vfs/v6/vfs.go b/vendor/github.com/c2fo/vfs/v6/vfs.go new file mode 100644 index 000000000..fad72bad5 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/vfs.go @@ -0,0 +1,247 @@ +package vfs + +import ( + "fmt" + "io" + "regexp" + "time" + + "github.com/c2fo/vfs/v6/options" +) + +// FileSystem represents a file system with any authentication accounted for. +type FileSystem interface { + // NewFile initializes a File on the specified volume at path 'absFilePath'. + // + // * Accepts volume and an absolute file path. + // * Upon success, a vfs.File, representing the file's new path (location path + file relative path), will be returned. + // * On error, nil is returned for the file. + // * Note that not all file systems will have a "volume" and will therefore be "": + // file:///path/to/file has a volume of "" and name /path/to/file + // whereas + // s3://mybucket/path/to/file has a volume of "mybucket and name /path/to/file + // results in /tmp/dir1/newerdir/file.txt for the final vfs.File path. + // * The file may or may not already exist. + NewFile(volume string, absFilePath string) (File, error) + + // NewLocation initializes a Location on the specified volume with the given path. + // + // * Accepts volume and an absolute location path. + // * The file may or may not already exist. Note that on key-store file systems like S3 or GCS, paths never truly exist. + // * On error, nil is returned for the location. + // + // See NewFile for note on volume. + NewLocation(volume string, absLocPath string) (Location, error) + + // Name returns the name of the FileSystem ie: Amazon S3, os, Google Cloud Storage, etc. + Name() string + + // Scheme returns the uri scheme used by the FileSystem: s3, file, gs, etc. + Scheme() string + + // Retry will return the retry function to be used by any file system. + Retry() Retry +} + +// Location represents a file system path which serves as a start point for directory-like functionality. A location may +// or may not actually exist on the file system. +type Location interface { + // String returns the fully qualified absolute URI for the Location. IE, file://bucket/some/path/ + fmt.Stringer + + // List returns a slice of strings representing the base names of the files found at the Location. + // + // * All implementations are expected to return ([]string{}, nil) in the case of a non-existent directory/prefix/location. + // * If the user cares about the distinction between an empty location and a non-existent one, Location.Exists() should + // be checked first. + List() ([]string, error) + + // ListByPrefix returns a slice of strings representing the base names of the files found in Location whose filenames + // match the given prefix. + // + // * All implementations are expected to return ([]string{}, nil) in the case of a non-existent directory/prefix/location. + // * "relative" prefixes are allowed, ie, listByPrefix from "/some/path/" with prefix "to/somepattern" is the same as + // location "/some/path/to/" with prefix of "somepattern" + // * If the user cares about the distinction between an empty location and a non-existent one, Location.Exists() should + // be checked first. + ListByPrefix(prefix string) ([]string, error) + + // ListByRegex returns a slice of strings representing the base names of the files found in Location that matched the + // given regular expression. + // + // * All implementations are expected to return ([]string{}, nil) in the case of a non-existent directory/prefix/location. + // * If the user cares about the distinction between an empty location and a non-existent one, Location.Exists() should + // be checked first. + ListByRegex(regex *regexp.Regexp) ([]string, error) + + // Volume returns the volume as string. In URI parlance, volume equates to authority. + // For example s3://mybucket/path/to/file.txt, volume would return "mybucket". + // + // Note: Some file systems may not have a volume and will return "". + Volume() string + + // Path returns absolute location path, ie /some/path/to/. An absolute path must be resolved to it's shortest path: + // see path.Clean + Path() string + + // Exists returns boolean if the location exists on the file system. Returns an error if any. + Exists() (bool, error) + + // NewLocation is an initializer for a new Location relative to the existing one. + // + // Given location: + // loc := fs.NewLocation(:s3://mybucket/some/path/to/") + // calling: + // newLoc := loc.NewLocation("../../") + // would return a new vfs.Location representing: + // s3://mybucket/some/ + // + // * Accepts a relative location path. + NewLocation(relLocPath string) (Location, error) + + // ChangeDir updates the existing Location's path to the provided relative location path. + // + // Given location: + // loc := fs.NewLocation("file:///some/path/to/") + // calling: + // loc.ChangeDir("../../") + // would update the current location instance to + // file:///some/. + // + // * ChangeDir accepts a relative location path. + ChangeDir(relLocPath string) error + + // FileSystem returns the underlying vfs.FileSystem struct for Location. + FileSystem() FileSystem + + // NewFile will instantiate a vfs.File instance at or relative to the current location's path. + // + // * Accepts a relative file path. + // * In the case of an error, nil is returned for the file. + // * Resultant File path will be the shortest path name equivalent of combining the Location path and relative path, if any. + // ie, /tmp/dir1/ as location and relFilePath "newdir/./../newerdir/file.txt" + // results in /tmp/dir1/newerdir/file.txt for the final vfs.File path. + // * Upon success, a vfs.File, representing the file's new path (location path + file relative path), will be returned. + // * The file may or may not already exist. + NewFile(relFilePath string) (File, error) + + // DeleteFile deletes the file of the given name at the location. + // + // This is meant to be a short cut for instantiating a new file and calling delete on that, with all the necessary + // error handling overhead. + // + // * Accepts relative file path. + DeleteFile(relFilePath string, deleteOpts ...options.DeleteOption) error + + // URI returns the fully qualified absolute URI for the Location. IE, s3://bucket/some/path/ + // + // URI's for locations must always end with a slash. + URI() string +} + +// File represents a file on a file system. A File may or may not actually exist on the file system. +type File interface { + io.Closer + io.Reader + io.Seeker + io.Writer + fmt.Stringer + + // Exists returns boolean if the file exists on the file system. Returns an error, if any. + Exists() (bool, error) + + // Location returns the vfs.Location for the File. + Location() Location + + // CopyToLocation will copy the current file to the provided location. + // + // * Upon success, a vfs.File, representing the file at the new location, will be returned. + // * In the case of an error, nil is returned for the file. + // * CopyToLocation should use native functions when possible within the same scheme. + // * If the file already exists at the location, the contents will be overwritten with the current file's contents. + // * Unless Seek position is at 0,0 a vfs.CopyToNotPossible will be returned + // * CopyToLocation will Close both the source and target Files which therefore can't be appended to without first + // calling Seek() to move the cursor to the end of the file. + CopyToLocation(location Location) (File, error) + + // CopyToFile will copy the current file to the provided file instance. + // + // * In the case of an error, nil is returned for the file. + // * CopyToLocation should use native functions when possible within the same scheme. + // * If the file already exists, the contents will be overwritten with the current file's contents. + // * Unless Seek position is at 0,0 a vfs.CopyToNotPossible will be returned + // * CopyToFile will Close both the source and target Files which therefore can't be appended to without first + // calling Seek() to move the cursor to the end of the file. + CopyToFile(file File) error + + // MoveToLocation will move the current file to the provided location. + // + // * If the file already exists at the location, the contents will be overwritten with the current file's contents. + // * If the location does not exist, an attempt will be made to create it. + // * Upon success, a vfs.File, representing the file at the new location, will be returned. + // * In the case of an error, nil is returned for the file. + // * When moving within the same Scheme, native move/rename should be used where possible. + // * If the file already exists, the contents will be overwritten with the current file's contents. + // * Unless Seek position is at 0,0 a vfs.CopyToNotPossible will be returned + // * MoveToLocation will Close both the source and target Files which therefore can't be appended to without first + // calling Seek() to move the cursor to the end of the file. + MoveToLocation(location Location) (File, error) + + // MoveToFile will move the current file to the provided file instance. + // + // * If the file already exists, the contents will be overwritten with the current file's contents. + // * Unless Seek position is at 0,0 a vfs.CopyToNotPossible will be returned + // * The current instance of the file will be removed. + // * MoveToFile will Close both the source and target Files which therefore can't be appended to without first + // calling Seek() to move the cursor to the end of the file. + MoveToFile(file File) error + + // Delete unlinks the File on the file system. + Delete(deleteOpts ...options.DeleteOption) error + + // LastModified returns the timestamp the file was last modified (as *time.Time). + LastModified() (*time.Time, error) + + // Size returns the size of the file in bytes. + Size() (uint64, error) + + // Path returns absolute path, including filename, ie /some/path/to/file.txt + // + // If the directory portion of a file is desired, call + // someFile.Location().Path() + Path() string + + // Name returns the base name of the file path. + // + // For file:///some/path/to/file.txt, it would return file.txt + Name() string + + // Touch creates a zero-length file on the vfs.File if no File exists. Update File's last modified timestamp. + // Returns error if unable to touch File. + Touch() error + + // URI returns the fully qualified absolute URI for the File. IE, s3://bucket/some/path/to/file.txt + URI() string +} + +// Options are structs that contain various options specific to the file system +type Options interface{} + +// Retry is a function that can be used to wrap any operation into a definable retry operation. The wrapped argument +// is called by the underlying VFS implementation. +// +// Ex: +// +// var retrier Retry = func(wrapped func() error) error { +// var ret error +// for i := 0; i < 5; i++ { +// if err := wrapped(); err != nil { ret = err; continue } +// } +// return ret +// } +type Retry func(wrapped func() error) error + +// DefaultRetryer returns a no-op retryer which simply calls the wrapped command without looping. +func DefaultRetryer() Retry { + return func(c func() error) error { return c() } +} diff --git a/vendor/github.com/c2fo/vfs/v6/vfssimple/doc.go b/vendor/github.com/c2fo/vfs/v6/vfssimple/doc.go new file mode 100644 index 000000000..ef77dd8dd --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/vfssimple/doc.go @@ -0,0 +1,114 @@ +/* +Package vfssimple provides a basic and easy to use set of functions to any supported backend file system by using full URI's: + - Local OS: file:///some/path/to/file.txt + - Amazon S3: s3://mybucket/path/to/file.txt + - Google Cloud Storage: gs://mybucket/path/to/file.txt + +# Usage + +Just import vfssimple. + + package main + + import ( + "fmt" + + "github.com/c2fo/vfs/v6/vfssimple" + ) + + func main() { + myLocalDir, err := vfssimple.NewLocation("file:///tmp/") + if err != nil { + panic(err) + } + + myS3File, err := vfssimple.NewFile("s3://mybucket/some/path/to/key.txt") + if err != nil { + panic(err) + } + + localFile, err := myS3File.MoveToLocation(myLocalDir) + if err != nil { + panic(err) + } + + fmt.Printf("moved %s to %s\n", myS3File, localFile) + } + +# Authentication and Options + +vfssimple is largely an example of how to initialize a set of backend file systems. It only provides a default +initialization of the individual file systems. See backend docs for specific authentication info for each backend but +generally speaking, most backends can use Environment variables to set credentials or client options. + +File systems can only use one set of options. If you would like to configure more than one file system of the same +type/schema with separate credentials, you can register and map file system options to locations or individual objects. +The vfssimple library will automatically try to resolve the provided URI in NewFile() or NewLocation() to the registered +file system. + + package main + + import( + "fmt" + + "github.com/c2fo/vfs/v6/backend" + "github.com/c2fo/vfs/v6/backend/s3" + "github.com/c2fo/vfs/v6/vfssimple" + ) + + func main() { + bucketAuth := s3.NewFileSystem().WithOptions(s3.Options{ + AccessKeyID: "key1", + SecretAccessKey: "secret1", + Region: "us-west-2", + }) + + fileAuth := s3.NewFileSystem().WithOptions(s3.Options{ + AccessKeyID: "key2", + SecretAccessKey: "secret2", + Region: "us-west-2", + }) + + backend.Register("s3://bucket1/", bucketAuth) + backend.Register("s3://bucket2/file.txt", fileAuth) + + secureFile, _ := vfssimple.NewFile("s3://bucket2/file.txt") + publicLocation, _ := vfssimple.NewLocation("s3://bucket1/") + + secureFile.CopyToLocation(publicLocation) + + fmt.Printf("copied %s to %s\n", secureFile, publicLocation) + } + +# Registered Backend Resolution + +Every backend type automatically registers itself as an available backend filesystem for vfssimple based on its scheme. In this way, +vfssimple is able to determine which backend to use for any related URI. As mentioned above, you can register your own initialized +filesystem as well. + +vfssimple resolves backends by doing a prefix match of the URI to the registered backend names, choosing the longest(most specific) matching +backend filesystem. + +For instance, given registered backends with the names: + + 's3' - registered by default + 's3://somebucket/' - perhaps this was registered using AWS access key id x + 's3://somebucket/path/' - and this was registered using AWS access key id y + 's3://somebucket/path/a.txt' - and this was registered using AWS access key id z + 's3://some' - another contrived registered fs for bucket + +See the expected registered bucket name for each: + + 's3://somebucket/path/a.txt' - URI: 's3://somebucket/path/a.txt' (most specific match) + 's3://somebucket/path/a.txt' - URI: 's3://somebucket/path/a.txt.tar.gz' (prefix still matches) + 's3://somebucket/path/' - URI: 's3://somebucket/path/otherfile.txt' (file only matches path-level registered fs) + 's3"//somebucket/path/' - URI: 's3://somebucket/path/' (exact path-level match) + 's3://somebucket/' - URI: 's3://somebucket/test/file.txt' (bucket-level match only) + 's3://somebucket/' - URI: 's3://somebucket/test/' (still bucket-level match only) + 's3://somebucket/' - URI: 's3://somebucket/' (exact bucket-level match) + 's3://some' - URI: 's3://some-other-bucket/' (bucket-level match) + 's3' - URI: 's3://other/' (scheme-level match, only) + 's3' - URI: 's3://other/file.txt' (scheme-level match, only) + 's3' - URI: 's3://other/path/to/nowhere/' (scheme-level match, only) +*/ +package vfssimple diff --git a/vendor/github.com/c2fo/vfs/v6/vfssimple/vfssimple.go b/vendor/github.com/c2fo/vfs/v6/vfssimple/vfssimple.go new file mode 100644 index 000000000..a49a54a32 --- /dev/null +++ b/vendor/github.com/c2fo/vfs/v6/vfssimple/vfssimple.go @@ -0,0 +1,123 @@ +package vfssimple + +import ( + "errors" + "fmt" + "net/url" + "strings" + + "github.com/c2fo/vfs/v6" + "github.com/c2fo/vfs/v6/backend" + _ "github.com/c2fo/vfs/v6/backend/all" // register all backends + "github.com/c2fo/vfs/v6/backend/azure" + "github.com/c2fo/vfs/v6/backend/mem" + "github.com/c2fo/vfs/v6/backend/os" +) + +var ( + // ErrMissingAuthority is return when authority is missing from a URI + ErrMissingAuthority = errors.New("unable to determine uri authority ([user@]host[:port]) for network-based scheme") + // ErrMissingScheme is returned when Scheme is missing from a URI + ErrMissingScheme = errors.New("unable to determine uri scheme") + // ErrRegFsNotFound is returned when no registered backend matches the URI + ErrRegFsNotFound = errors.New("no matching registered filesystem found") + // ErrBlankURI is returned when the URI is blank + ErrBlankURI = errors.New("uri is blank") +) + +// NewLocation is a convenience function that allows for instantiating a location based on a uri string. Any +// backend file system is supported, though some may require prior configuration. See the docs for +// specific requirements of each +func NewLocation(uri string) (vfs.Location, error) { + fs, host, path, err := parseSupportedURI(uri) + if err != nil { + return nil, fmt.Errorf("unable to create vfs.Location for uri %q: %w", uri, err) + } + + return fs.NewLocation(host, path) +} + +// NewFile is a convenience function that allows for instantiating a file based on a uri string. Any +// backend file system is supported, though some may require prior configuration. See the docs for +// specific requirements of each. +func NewFile(uri string) (vfs.File, error) { + fs, host, path, err := parseSupportedURI(uri) + if err != nil { + return nil, fmt.Errorf("unable to create vfs.File for uri %q: %w", uri, err) + } + + return fs.NewFile(host, path) +} + +// parseURI attempts to parse a URI and validate that it returns required results +func parseURI(uri string) (scheme, authority, path string, err error) { + // return early if blank uri + if uri == "" { + err = ErrBlankURI + return + } + + // parse URI + var u *url.URL + u, err = url.Parse(uri) + if err != nil { + err = fmt.Errorf("unknown url.Parse error: %w", err) + return + } + + // validate schema + scheme = u.Scheme + if u.Scheme == "" { + err = ErrMissingScheme + return + } + + // validate authority + authority = u.Host + path = u.Path + if azure.IsValidURI(u) { + authority, path, err = azure.ParsePath(path) + } + + if u.User.String() != "" { + authority = fmt.Sprintf("%s@%s", u.User, u.Host) + } + // network-based schemes require authority, but not file:// or mem:// + if authority == "" && !(scheme == os.Scheme || scheme == mem.Scheme) { + return "", "", "", ErrMissingAuthority + } + + return +} + +// parseSupportedURI checks if URI matches any backend name as prefix, capturing the longest(most specific) match found. +// See doc.go Registered Backend Resolution section for examples. +func parseSupportedURI(uri string) (vfs.FileSystem, string, string, error) { + _, authority, path, err := parseURI(uri) + if err != nil { + return nil, "", "", err + } + + var longest string + backends := backend.RegisteredBackends() + for _, backendName := range backends { + if strings.HasPrefix(uri, backendName) { + // The first match always becomes the longest + if longest == "" { + longest = backendName + continue + } + + // we found a longer (more specific) backend prefix matching URI + if len(backendName) > len(longest) { + longest = backendName + } + } + } + + if longest == "" { + err = ErrRegFsNotFound + } + + return backend.Backend(longest), authority, path, err +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/.gitignore b/vendor/github.com/golang-jwt/jwt/v4/.gitignore new file mode 100644 index 000000000..09573e016 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/.gitignore @@ -0,0 +1,4 @@ +.DS_Store +bin +.idea/ + diff --git a/vendor/github.com/golang-jwt/jwt/v4/LICENSE b/vendor/github.com/golang-jwt/jwt/v4/LICENSE new file mode 100644 index 000000000..35dbc2520 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) 2012 Dave Grijalva +Copyright (c) 2021 golang-jwt maintainers + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md new file mode 100644 index 000000000..32966f598 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md @@ -0,0 +1,22 @@ +## Migration Guide (v4.0.0) + +Starting from [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0), the import path will be: + + "github.com/golang-jwt/jwt/v4" + +The `/v4` version will be backwards compatible with existing `v3.x.y` tags in this repo, as well as +`github.com/dgrijalva/jwt-go`. For most users this should be a drop-in replacement, if you're having +troubles migrating, please open an issue. + +You can replace all occurrences of `github.com/dgrijalva/jwt-go` or `github.com/golang-jwt/jwt` with `github.com/golang-jwt/jwt/v4`, either manually or by using tools such as `sed` or `gofmt`. + +And then you'd typically run: + +``` +go get github.com/golang-jwt/jwt/v4 +go mod tidy +``` + +## Older releases (before v3.2.0) + +The original migration guide for older releases can be found at https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md. diff --git a/vendor/github.com/golang-jwt/jwt/v4/README.md b/vendor/github.com/golang-jwt/jwt/v4/README.md new file mode 100644 index 000000000..30f2f2a6f --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/README.md @@ -0,0 +1,138 @@ +# jwt-go + +[![build](https://github.com/golang-jwt/jwt/actions/workflows/build.yml/badge.svg)](https://github.com/golang-jwt/jwt/actions/workflows/build.yml) +[![Go Reference](https://pkg.go.dev/badge/github.com/golang-jwt/jwt/v4.svg)](https://pkg.go.dev/github.com/golang-jwt/jwt/v4) + +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](https://datatracker.ietf.org/doc/html/rfc7519). + +Starting with [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0) this project adds Go module support, but maintains backwards compatibility with older `v3.x.y` tags and upstream `github.com/dgrijalva/jwt-go`. +See the [`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information. + +> After the original author of the library suggested migrating the maintenance of `jwt-go`, a dedicated team of open source maintainers decided to clone the existing library into this repository. See [dgrijalva/jwt-go#462](https://github.com/dgrijalva/jwt-go/issues/462) for a detailed discussion on this topic. + + +**SECURITY NOTICE:** Some older versions of Go have a security issue in the crypto/elliptic. Recommendation is to upgrade to at least 1.15 See issue [dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more detail. + +**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. + +### Supported Go versions + +Our support of Go versions is aligned with Go's [version release policy](https://golang.org/doc/devel/release#policy). +So we will support a major version of Go until there are two newer major releases. +We no longer support building jwt-go with unsupported Go versions, as these contain security vulnerabilities +which will not be fixed. + +## What the heck is a JWT? + +JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. + +In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](https://datatracker.ietf.org/doc/html/rfc4648) encoded. The last part is the signature, encoded the same way. + +The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. + +The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [RFC 7519](https://datatracker.ietf.org/doc/html/rfc7519) for information about reserved keys and the proper way to add your own. + +## What's in the box? + +This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. + +## Installation Guidelines + +1. To install the jwt package, you first need to have [Go](https://go.dev/doc/install) installed, then you can use the command below to add `jwt-go` as a dependency in your Go program. + +```sh +go get -u github.com/golang-jwt/jwt/v4 +``` + +2. Import it in your code: + +```go +import "github.com/golang-jwt/jwt/v4" +``` + +## Examples + +See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt/v4) for examples of usage: + +* [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#example-Parse-Hmac) +* [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#example-New-Hmac) +* [Directory of Examples](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#pkg-examples) + +## Extensions + +This library publishes all the necessary components for adding your own signing methods or key functions. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod` or provide a `jwt.Keyfunc`. + +A common use case would be integrating with different 3rd party signature providers, like key management services from various cloud providers or Hardware Security Modules (HSMs) or to implement additional standards. + +| Extension | Purpose | Repo | +| --------- | -------------------------------------------------------------------------------------------------------- | ------------------------------------------ | +| GCP | Integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS) | https://github.com/someone1/gcp-jwt-go | +| AWS | Integrates with AWS Key Management Service, KMS | https://github.com/matelang/jwt-go-aws-kms | +| JWKS | Provides support for JWKS ([RFC 7517](https://datatracker.ietf.org/doc/html/rfc7517)) as a `jwt.Keyfunc` | https://github.com/MicahParks/keyfunc | + +*Disclaimer*: Unless otherwise specified, these integrations are maintained by third parties and should not be considered as a primary offer by any of the mentioned cloud providers + +## Compliance + +This library was last reviewed to comply with [RFC 7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few notable differences: + +* In order to protect against accidental use of [Unsecured JWTs](https://datatracker.ietf.org/doc/html/rfc7519#section-6), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. + +## Project Status & Versioning + +This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). + +This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `main`. Periodically, versions will be tagged from `main`. You can find all the releases on [the project releases page](https://github.com/golang-jwt/jwt/releases). + +**BREAKING CHANGES:*** +A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. + +## Usage Tips + +### Signing vs Encryption + +A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: + +* The author of the token was in the possession of the signing secret +* The data has not been modified since it was signed + +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. The companion project https://github.com/golang-jwt/jwe aims at a (very) experimental implementation of the JWE standard. + +### Choosing a Signing Method + +There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. + +Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. + +Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. + +### Signing Methods and Key Types + +Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: + +* The [HMAC signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation +* The [RSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation +* The [ECDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation +* The [EdDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodEd25519) (`Ed25519`) expect `ed25519.PrivateKey` for signing and `ed25519.PublicKey` for validation + +### JWT and OAuth + +It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. + +Without going too far down the rabbit hole, here's a description of the interaction of these technologies: + +* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. +* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. +* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. + +### Troubleshooting + +This library uses descriptive error messages whenever possible. If you are not getting the expected result, have a look at the errors. The most common place people get stuck is providing the correct type of key to the parser. See the above section on signing methods and key types. + +## More + +Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt/v4). + +The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. + +[golang-jwt](https://github.com/orgs/golang-jwt) incorporates a modified version of the JWT logo, which is distributed under the terms of the [MIT License](https://github.com/jsonwebtoken/jsonwebtoken.github.io/blob/master/LICENSE.txt). diff --git a/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md b/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md new file mode 100644 index 000000000..b08402c34 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +## Supported Versions + +As of February 2022 (and until this document is updated), the latest version `v4` is supported. + +## Reporting a Vulnerability + +If you think you found a vulnerability, and even if you are not sure, please report it to jwt-go-security@googlegroups.com or one of the other [golang-jwt maintainers](https://github.com/orgs/golang-jwt/people). Please try be explicit, describe steps to reproduce the security issue with code example(s). + +You will receive a response within a timely manner. If the issue is confirmed, we will do our best to release a patch as soon as possible given the complexity of the problem. + +## Public Discussions + +Please avoid publicly discussing a potential security vulnerability. + +Let's take this offline and find a solution first, this limits the potential impact as much as possible. + +We appreciate your help! diff --git a/vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md b/vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md new file mode 100644 index 000000000..afbfc4e40 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md @@ -0,0 +1,135 @@ +## `jwt-go` Version History + +#### 4.0.0 + +* Introduces support for Go modules. The `v4` version will be backwards compatible with `v3.x.y`. + +#### 3.2.2 + +* Starting from this release, we are adopting the policy to support the most 2 recent versions of Go currently available. By the time of this release, this is Go 1.15 and 1.16 ([#28](https://github.com/golang-jwt/jwt/pull/28)). +* Fixed a potential issue that could occur when the verification of `exp`, `iat` or `nbf` was not required and contained invalid contents, i.e. non-numeric/date. Thanks for @thaJeztah for making us aware of that and @giorgos-f3 for originally reporting it to the formtech fork ([#40](https://github.com/golang-jwt/jwt/pull/40)). +* Added support for EdDSA / ED25519 ([#36](https://github.com/golang-jwt/jwt/pull/36)). +* Optimized allocations ([#33](https://github.com/golang-jwt/jwt/pull/33)). + +#### 3.2.1 + +* **Import Path Change**: See MIGRATION_GUIDE.md for tips on updating your code + * Changed the import path from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt` +* Fixed type confusing issue between `string` and `[]string` in `VerifyAudience` ([#12](https://github.com/golang-jwt/jwt/pull/12)). This fixes CVE-2020-26160 + +#### 3.2.0 + +* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation +* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate +* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. +* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. + +#### 3.1.0 + +* Improvements to `jwt` command line tool +* Added `SkipClaimsValidation` option to `Parser` +* Documentation updates + +#### 3.0.0 + +* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code + * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. + * `ParseFromRequest` has been moved to `request` subpackage and usage has changed + * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. +* Other Additions and Changes + * Added `Claims` interface type to allow users to decode the claims into a custom type + * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. + * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage + * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` + * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. + * Added several new, more specific, validation errors to error type bitmask + * Moved examples from README to executable example files + * Signing method registry is now thread safe + * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) + +#### 2.7.0 + +This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. + +* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying +* Error text for expired tokens includes how long it's been expired +* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` +* Documentation updates + +#### 2.6.0 + +* Exposed inner error within ValidationError +* Fixed validation errors when using UseJSONNumber flag +* Added several unit tests + +#### 2.5.0 + +* Added support for signing method none. You shouldn't use this. The API tries to make this clear. +* Updated/fixed some documentation +* Added more helpful error message when trying to parse tokens that begin with `BEARER ` + +#### 2.4.0 + +* Added new type, Parser, to allow for configuration of various parsing parameters + * You can now specify a list of valid signing methods. Anything outside this set will be rejected. + * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON +* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) +* Fixed some bugs with ECDSA parsing + +#### 2.3.0 + +* Added support for ECDSA signing methods +* Added support for RSA PSS signing methods (requires go v1.4) + +#### 2.2.0 + +* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. + +#### 2.1.0 + +Backwards compatible API change that was missed in 2.0.0. + +* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` + +#### 2.0.0 + +There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. + +The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. + +It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. + +* **Compatibility Breaking Changes** + * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` + * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` + * `KeyFunc` now returns `interface{}` instead of `[]byte` + * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key + * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key +* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodHS256` + * Added public package global `SigningMethodHS384` + * Added public package global `SigningMethodHS512` +* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodRS256` + * Added public package global `SigningMethodRS384` + * Added public package global `SigningMethodRS512` +* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. +* Refactored the RSA implementation to be easier to read +* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` + +#### 1.0.2 + +* Fixed bug in parsing public keys from certificates +* Added more tests around the parsing of keys for RS256 +* Code refactoring in RS256 implementation. No functional changes + +#### 1.0.1 + +* Fixed panic if RS256 signing method was passed an invalid key + +#### 1.0.0 + +* First versioned release +* API stabilized +* Supports creating, signing, parsing, and validating JWT tokens +* Supports RS256 and HS256 signing methods diff --git a/vendor/github.com/golang-jwt/jwt/v4/claims.go b/vendor/github.com/golang-jwt/jwt/v4/claims.go new file mode 100644 index 000000000..364cec877 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/claims.go @@ -0,0 +1,269 @@ +package jwt + +import ( + "crypto/subtle" + "fmt" + "time" +) + +// Claims must just have a Valid method that determines +// if the token is invalid for any supported reason +type Claims interface { + Valid() error +} + +// RegisteredClaims are a structured version of the JWT Claims Set, +// restricted to Registered Claim Names, as referenced at +// https://datatracker.ietf.org/doc/html/rfc7519#section-4.1 +// +// This type can be used on its own, but then additional private and +// public claims embedded in the JWT will not be parsed. The typical usecase +// therefore is to embedded this in a user-defined claim type. +// +// See examples for how to use this with your own claim types. +type RegisteredClaims struct { + // the `iss` (Issuer) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.1 + Issuer string `json:"iss,omitempty"` + + // the `sub` (Subject) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.2 + Subject string `json:"sub,omitempty"` + + // the `aud` (Audience) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.3 + Audience ClaimStrings `json:"aud,omitempty"` + + // the `exp` (Expiration Time) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.4 + ExpiresAt *NumericDate `json:"exp,omitempty"` + + // the `nbf` (Not Before) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.5 + NotBefore *NumericDate `json:"nbf,omitempty"` + + // the `iat` (Issued At) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.6 + IssuedAt *NumericDate `json:"iat,omitempty"` + + // the `jti` (JWT ID) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.7 + ID string `json:"jti,omitempty"` +} + +// Valid validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c RegisteredClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if !c.VerifyExpiresAt(now, false) { + delta := now.Sub(c.ExpiresAt.Time) + vErr.Inner = fmt.Errorf("%s by %s", ErrTokenExpired, delta) + vErr.Errors |= ValidationErrorExpired + } + + if !c.VerifyIssuedAt(now, false) { + vErr.Inner = ErrTokenUsedBeforeIssued + vErr.Errors |= ValidationErrorIssuedAt + } + + if !c.VerifyNotBefore(now, false) { + vErr.Inner = ErrTokenNotValidYet + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// VerifyAudience compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *RegisteredClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud(c.Audience, cmp, req) +} + +// VerifyExpiresAt compares the exp claim against cmp (cmp < exp). +// If req is false, it will return true, if exp is unset. +func (c *RegisteredClaims) VerifyExpiresAt(cmp time.Time, req bool) bool { + if c.ExpiresAt == nil { + return verifyExp(nil, cmp, req) + } + + return verifyExp(&c.ExpiresAt.Time, cmp, req) +} + +// VerifyIssuedAt compares the iat claim against cmp (cmp >= iat). +// If req is false, it will return true, if iat is unset. +func (c *RegisteredClaims) VerifyIssuedAt(cmp time.Time, req bool) bool { + if c.IssuedAt == nil { + return verifyIat(nil, cmp, req) + } + + return verifyIat(&c.IssuedAt.Time, cmp, req) +} + +// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf). +// If req is false, it will return true, if nbf is unset. +func (c *RegisteredClaims) VerifyNotBefore(cmp time.Time, req bool) bool { + if c.NotBefore == nil { + return verifyNbf(nil, cmp, req) + } + + return verifyNbf(&c.NotBefore.Time, cmp, req) +} + +// VerifyIssuer compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *RegisteredClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + +// StandardClaims are a structured version of the JWT Claims Set, as referenced at +// https://datatracker.ietf.org/doc/html/rfc7519#section-4. They do not follow the +// specification exactly, since they were based on an earlier draft of the +// specification and not updated. The main difference is that they only +// support integer-based date fields and singular audiences. This might lead to +// incompatibilities with other JWT implementations. The use of this is discouraged, instead +// the newer RegisteredClaims struct should be used. +// +// Deprecated: Use RegisteredClaims instead for a forward-compatible way to access registered claims in a struct. +type StandardClaims struct { + Audience string `json:"aud,omitempty"` + ExpiresAt int64 `json:"exp,omitempty"` + Id string `json:"jti,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + Issuer string `json:"iss,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + Subject string `json:"sub,omitempty"` +} + +// Valid validates time based claims "exp, iat, nbf". There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c StandardClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if !c.VerifyExpiresAt(now, false) { + delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) + vErr.Inner = fmt.Errorf("%s by %s", ErrTokenExpired, delta) + vErr.Errors |= ValidationErrorExpired + } + + if !c.VerifyIssuedAt(now, false) { + vErr.Inner = ErrTokenUsedBeforeIssued + vErr.Errors |= ValidationErrorIssuedAt + } + + if !c.VerifyNotBefore(now, false) { + vErr.Inner = ErrTokenNotValidYet + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// VerifyAudience compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud([]string{c.Audience}, cmp, req) +} + +// VerifyExpiresAt compares the exp claim against cmp (cmp < exp). +// If req is false, it will return true, if exp is unset. +func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { + if c.ExpiresAt == 0 { + return verifyExp(nil, time.Unix(cmp, 0), req) + } + + t := time.Unix(c.ExpiresAt, 0) + return verifyExp(&t, time.Unix(cmp, 0), req) +} + +// VerifyIssuedAt compares the iat claim against cmp (cmp >= iat). +// If req is false, it will return true, if iat is unset. +func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { + if c.IssuedAt == 0 { + return verifyIat(nil, time.Unix(cmp, 0), req) + } + + t := time.Unix(c.IssuedAt, 0) + return verifyIat(&t, time.Unix(cmp, 0), req) +} + +// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf). +// If req is false, it will return true, if nbf is unset. +func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { + if c.NotBefore == 0 { + return verifyNbf(nil, time.Unix(cmp, 0), req) + } + + t := time.Unix(c.NotBefore, 0) + return verifyNbf(&t, time.Unix(cmp, 0), req) +} + +// VerifyIssuer compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + +// ----- helpers + +func verifyAud(aud []string, cmp string, required bool) bool { + if len(aud) == 0 { + return !required + } + // use a var here to keep constant time compare when looping over a number of claims + result := false + + var stringClaims string + for _, a := range aud { + if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 { + result = true + } + stringClaims = stringClaims + a + } + + // case where "" is sent in one or many aud claims + if len(stringClaims) == 0 { + return !required + } + + return result +} + +func verifyExp(exp *time.Time, now time.Time, required bool) bool { + if exp == nil { + return !required + } + return now.Before(*exp) +} + +func verifyIat(iat *time.Time, now time.Time, required bool) bool { + if iat == nil { + return !required + } + return now.After(*iat) || now.Equal(*iat) +} + +func verifyNbf(nbf *time.Time, now time.Time, required bool) bool { + if nbf == nil { + return !required + } + return now.After(*nbf) || now.Equal(*nbf) +} + +func verifyIss(iss string, cmp string, required bool) bool { + if iss == "" { + return !required + } + return subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/doc.go b/vendor/github.com/golang-jwt/jwt/v4/doc.go new file mode 100644 index 000000000..a86dc1a3b --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/doc.go @@ -0,0 +1,4 @@ +// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html +// +// See README.md for more info. +package jwt diff --git a/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go b/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go new file mode 100644 index 000000000..eac023fc6 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go @@ -0,0 +1,142 @@ +package jwt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "errors" + "math/big" +) + +var ( + // Sadly this is missing from crypto/ecdsa compared to crypto/rsa + ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") +) + +// SigningMethodECDSA implements the ECDSA family of signing methods. +// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification +type SigningMethodECDSA struct { + Name string + Hash crypto.Hash + KeySize int + CurveBits int +} + +// Specific instances for EC256 and company +var ( + SigningMethodES256 *SigningMethodECDSA + SigningMethodES384 *SigningMethodECDSA + SigningMethodES512 *SigningMethodECDSA +) + +func init() { + // ES256 + SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} + RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { + return SigningMethodES256 + }) + + // ES384 + SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} + RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { + return SigningMethodES384 + }) + + // ES512 + SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} + RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { + return SigningMethodES512 + }) +} + +func (m *SigningMethodECDSA) Alg() string { + return m.Name +} + +// Verify implements token verification for the SigningMethod. +// For this verify method, key must be an ecdsa.PublicKey struct +func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Get the key + var ecdsaKey *ecdsa.PublicKey + switch k := key.(type) { + case *ecdsa.PublicKey: + ecdsaKey = k + default: + return ErrInvalidKeyType + } + + if len(sig) != 2*m.KeySize { + return ErrECDSAVerification + } + + r := big.NewInt(0).SetBytes(sig[:m.KeySize]) + s := big.NewInt(0).SetBytes(sig[m.KeySize:]) + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus { + return nil + } + + return ErrECDSAVerification +} + +// Sign implements token signing for the SigningMethod. +// For this signing method, key must be an ecdsa.PrivateKey struct +func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { + // Get the key + var ecdsaKey *ecdsa.PrivateKey + switch k := key.(type) { + case *ecdsa.PrivateKey: + ecdsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return r, s + if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { + curveBits := ecdsaKey.Curve.Params().BitSize + + if m.CurveBits != curveBits { + return "", ErrInvalidKey + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes += 1 + } + + // We serialize the outputs (r and s) into big-endian byte arrays + // padded with zeros on the left to make sure the sizes work out. + // Output must be 2*keyBytes long. + out := make([]byte, 2*keyBytes) + r.FillBytes(out[0:keyBytes]) // r is assigned to the first half of output. + s.FillBytes(out[keyBytes:]) // s is assigned to the second half of output. + + return EncodeSegment(out), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go b/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go new file mode 100644 index 000000000..5700636d3 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go @@ -0,0 +1,69 @@ +package jwt + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotECPublicKey = errors.New("key is not a valid ECDSA public key") + ErrNotECPrivateKey = errors.New("key is not a valid ECDSA private key") +) + +// ParseECPrivateKeyFromPEM parses a PEM encoded Elliptic Curve Private Key Structure +func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *ecdsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, ErrNotECPrivateKey + } + + return pkey, nil +} + +// ParseECPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key +func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *ecdsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, ErrNotECPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/ed25519.go b/vendor/github.com/golang-jwt/jwt/v4/ed25519.go new file mode 100644 index 000000000..07d3aacd6 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/ed25519.go @@ -0,0 +1,85 @@ +package jwt + +import ( + "errors" + + "crypto" + "crypto/ed25519" + "crypto/rand" +) + +var ( + ErrEd25519Verification = errors.New("ed25519: verification error") +) + +// SigningMethodEd25519 implements the EdDSA family. +// Expects ed25519.PrivateKey for signing and ed25519.PublicKey for verification +type SigningMethodEd25519 struct{} + +// Specific instance for EdDSA +var ( + SigningMethodEdDSA *SigningMethodEd25519 +) + +func init() { + SigningMethodEdDSA = &SigningMethodEd25519{} + RegisterSigningMethod(SigningMethodEdDSA.Alg(), func() SigningMethod { + return SigningMethodEdDSA + }) +} + +func (m *SigningMethodEd25519) Alg() string { + return "EdDSA" +} + +// Verify implements token verification for the SigningMethod. +// For this verify method, key must be an ed25519.PublicKey +func (m *SigningMethodEd25519) Verify(signingString, signature string, key interface{}) error { + var err error + var ed25519Key ed25519.PublicKey + var ok bool + + if ed25519Key, ok = key.(ed25519.PublicKey); !ok { + return ErrInvalidKeyType + } + + if len(ed25519Key) != ed25519.PublicKeySize { + return ErrInvalidKey + } + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Verify the signature + if !ed25519.Verify(ed25519Key, []byte(signingString), sig) { + return ErrEd25519Verification + } + + return nil +} + +// Sign implements token signing for the SigningMethod. +// For this signing method, key must be an ed25519.PrivateKey +func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) (string, error) { + var ed25519Key crypto.Signer + var ok bool + + if ed25519Key, ok = key.(crypto.Signer); !ok { + return "", ErrInvalidKeyType + } + + if _, ok := ed25519Key.Public().(ed25519.PublicKey); !ok { + return "", ErrInvalidKey + } + + // Sign the string and return the encoded result + // ed25519 performs a two-pass hash as part of its algorithm. Therefore, we need to pass a non-prehashed message into the Sign function, as indicated by crypto.Hash(0) + sig, err := ed25519Key.Sign(rand.Reader, []byte(signingString), crypto.Hash(0)) + if err != nil { + return "", err + } + return EncodeSegment(sig), nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go b/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go new file mode 100644 index 000000000..cdb5e68e8 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go @@ -0,0 +1,64 @@ +package jwt + +import ( + "crypto" + "crypto/ed25519" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotEdPrivateKey = errors.New("key is not a valid Ed25519 private key") + ErrNotEdPublicKey = errors.New("key is not a valid Ed25519 public key") +) + +// ParseEdPrivateKeyFromPEM parses a PEM-encoded Edwards curve private key +func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + + var pkey ed25519.PrivateKey + var ok bool + if pkey, ok = parsedKey.(ed25519.PrivateKey); !ok { + return nil, ErrNotEdPrivateKey + } + + return pkey, nil +} + +// ParseEdPublicKeyFromPEM parses a PEM-encoded Edwards curve public key +func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + return nil, err + } + + var pkey ed25519.PublicKey + var ok bool + if pkey, ok = parsedKey.(ed25519.PublicKey); !ok { + return nil, ErrNotEdPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/errors.go b/vendor/github.com/golang-jwt/jwt/v4/errors.go new file mode 100644 index 000000000..10ac8835c --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/errors.go @@ -0,0 +1,112 @@ +package jwt + +import ( + "errors" +) + +// Error constants +var ( + ErrInvalidKey = errors.New("key is invalid") + ErrInvalidKeyType = errors.New("key is of invalid type") + ErrHashUnavailable = errors.New("the requested hash function is unavailable") + + ErrTokenMalformed = errors.New("token is malformed") + ErrTokenUnverifiable = errors.New("token is unverifiable") + ErrTokenSignatureInvalid = errors.New("token signature is invalid") + + ErrTokenInvalidAudience = errors.New("token has invalid audience") + ErrTokenExpired = errors.New("token is expired") + ErrTokenUsedBeforeIssued = errors.New("token used before issued") + ErrTokenInvalidIssuer = errors.New("token has invalid issuer") + ErrTokenNotValidYet = errors.New("token is not valid yet") + ErrTokenInvalidId = errors.New("token has invalid id") + ErrTokenInvalidClaims = errors.New("token has invalid claims") +) + +// The errors that might occur when parsing and validating a token +const ( + ValidationErrorMalformed uint32 = 1 << iota // Token is malformed + ValidationErrorUnverifiable // Token could not be verified because of signing problems + ValidationErrorSignatureInvalid // Signature validation failed + + // Standard Claim validation errors + ValidationErrorAudience // AUD validation failed + ValidationErrorExpired // EXP validation failed + ValidationErrorIssuedAt // IAT validation failed + ValidationErrorIssuer // ISS validation failed + ValidationErrorNotValidYet // NBF validation failed + ValidationErrorId // JTI validation failed + ValidationErrorClaimsInvalid // Generic claims validation error +) + +// NewValidationError is a helper for constructing a ValidationError with a string error message +func NewValidationError(errorText string, errorFlags uint32) *ValidationError { + return &ValidationError{ + text: errorText, + Errors: errorFlags, + } +} + +// ValidationError represents an error from Parse if token is not valid +type ValidationError struct { + Inner error // stores the error returned by external dependencies, i.e.: KeyFunc + Errors uint32 // bitfield. see ValidationError... constants + text string // errors that do not have a valid error just have text +} + +// Error is the implementation of the err interface. +func (e ValidationError) Error() string { + if e.Inner != nil { + return e.Inner.Error() + } else if e.text != "" { + return e.text + } else { + return "token is invalid" + } +} + +// Unwrap gives errors.Is and errors.As access to the inner error. +func (e *ValidationError) Unwrap() error { + return e.Inner +} + +// No errors +func (e *ValidationError) valid() bool { + return e.Errors == 0 +} + +// Is checks if this ValidationError is of the supplied error. We are first checking for the exact error message +// by comparing the inner error message. If that fails, we compare using the error flags. This way we can use +// custom error messages (mainly for backwards compatability) and still leverage errors.Is using the global error variables. +func (e *ValidationError) Is(err error) bool { + // Check, if our inner error is a direct match + if errors.Is(errors.Unwrap(e), err) { + return true + } + + // Otherwise, we need to match using our error flags + switch err { + case ErrTokenMalformed: + return e.Errors&ValidationErrorMalformed != 0 + case ErrTokenUnverifiable: + return e.Errors&ValidationErrorUnverifiable != 0 + case ErrTokenSignatureInvalid: + return e.Errors&ValidationErrorSignatureInvalid != 0 + case ErrTokenInvalidAudience: + return e.Errors&ValidationErrorAudience != 0 + case ErrTokenExpired: + return e.Errors&ValidationErrorExpired != 0 + case ErrTokenUsedBeforeIssued: + return e.Errors&ValidationErrorIssuedAt != 0 + case ErrTokenInvalidIssuer: + return e.Errors&ValidationErrorIssuer != 0 + case ErrTokenNotValidYet: + return e.Errors&ValidationErrorNotValidYet != 0 + case ErrTokenInvalidId: + return e.Errors&ValidationErrorId != 0 + case ErrTokenInvalidClaims: + return e.Errors&ValidationErrorClaimsInvalid != 0 + } + + return false +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/hmac.go b/vendor/github.com/golang-jwt/jwt/v4/hmac.go new file mode 100644 index 000000000..011f68a27 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/hmac.go @@ -0,0 +1,95 @@ +package jwt + +import ( + "crypto" + "crypto/hmac" + "errors" +) + +// SigningMethodHMAC implements the HMAC-SHA family of signing methods. +// Expects key type of []byte for both signing and validation +type SigningMethodHMAC struct { + Name string + Hash crypto.Hash +} + +// Specific instances for HS256 and company +var ( + SigningMethodHS256 *SigningMethodHMAC + SigningMethodHS384 *SigningMethodHMAC + SigningMethodHS512 *SigningMethodHMAC + ErrSignatureInvalid = errors.New("signature is invalid") +) + +func init() { + // HS256 + SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { + return SigningMethodHS256 + }) + + // HS384 + SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { + return SigningMethodHS384 + }) + + // HS512 + SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { + return SigningMethodHS512 + }) +} + +func (m *SigningMethodHMAC) Alg() string { + return m.Name +} + +// Verify implements token verification for the SigningMethod. Returns nil if the signature is valid. +func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { + // Verify the key is the right type + keyBytes, ok := key.([]byte) + if !ok { + return ErrInvalidKeyType + } + + // Decode signature, for comparison + sig, err := DecodeSegment(signature) + if err != nil { + return err + } + + // Can we use the specified hashing method? + if !m.Hash.Available() { + return ErrHashUnavailable + } + + // This signing method is symmetric, so we validate the signature + // by reproducing the signature from the signing string and key, then + // comparing that against the provided signature. + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + if !hmac.Equal(sig, hasher.Sum(nil)) { + return ErrSignatureInvalid + } + + // No validation errors. Signature is good. + return nil +} + +// Sign implements token signing for the SigningMethod. +// Key must be []byte +func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { + if keyBytes, ok := key.([]byte); ok { + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + + return EncodeSegment(hasher.Sum(nil)), nil + } + + return "", ErrInvalidKeyType +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/map_claims.go b/vendor/github.com/golang-jwt/jwt/v4/map_claims.go new file mode 100644 index 000000000..2700d64a0 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/map_claims.go @@ -0,0 +1,151 @@ +package jwt + +import ( + "encoding/json" + "errors" + "time" + // "fmt" +) + +// MapClaims is a claims type that uses the map[string]interface{} for JSON decoding. +// This is the default claims type if you don't supply one +type MapClaims map[string]interface{} + +// VerifyAudience Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyAudience(cmp string, req bool) bool { + var aud []string + switch v := m["aud"].(type) { + case string: + aud = append(aud, v) + case []string: + aud = v + case []interface{}: + for _, a := range v { + vs, ok := a.(string) + if !ok { + return false + } + aud = append(aud, vs) + } + } + return verifyAud(aud, cmp, req) +} + +// VerifyExpiresAt compares the exp claim against cmp (cmp <= exp). +// If req is false, it will return true, if exp is unset. +func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { + cmpTime := time.Unix(cmp, 0) + + v, ok := m["exp"] + if !ok { + return !req + } + + switch exp := v.(type) { + case float64: + if exp == 0 { + return verifyExp(nil, cmpTime, req) + } + + return verifyExp(&newNumericDateFromSeconds(exp).Time, cmpTime, req) + case json.Number: + v, _ := exp.Float64() + + return verifyExp(&newNumericDateFromSeconds(v).Time, cmpTime, req) + } + + return false +} + +// VerifyIssuedAt compares the exp claim against cmp (cmp >= iat). +// If req is false, it will return true, if iat is unset. +func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { + cmpTime := time.Unix(cmp, 0) + + v, ok := m["iat"] + if !ok { + return !req + } + + switch iat := v.(type) { + case float64: + if iat == 0 { + return verifyIat(nil, cmpTime, req) + } + + return verifyIat(&newNumericDateFromSeconds(iat).Time, cmpTime, req) + case json.Number: + v, _ := iat.Float64() + + return verifyIat(&newNumericDateFromSeconds(v).Time, cmpTime, req) + } + + return false +} + +// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf). +// If req is false, it will return true, if nbf is unset. +func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { + cmpTime := time.Unix(cmp, 0) + + v, ok := m["nbf"] + if !ok { + return !req + } + + switch nbf := v.(type) { + case float64: + if nbf == 0 { + return verifyNbf(nil, cmpTime, req) + } + + return verifyNbf(&newNumericDateFromSeconds(nbf).Time, cmpTime, req) + case json.Number: + v, _ := nbf.Float64() + + return verifyNbf(&newNumericDateFromSeconds(v).Time, cmpTime, req) + } + + return false +} + +// VerifyIssuer compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { + iss, _ := m["iss"].(string) + return verifyIss(iss, cmp, req) +} + +// Valid validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (m MapClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + if !m.VerifyExpiresAt(now, false) { + // TODO(oxisto): this should be replaced with ErrTokenExpired + vErr.Inner = errors.New("Token is expired") + vErr.Errors |= ValidationErrorExpired + } + + if !m.VerifyIssuedAt(now, false) { + // TODO(oxisto): this should be replaced with ErrTokenUsedBeforeIssued + vErr.Inner = errors.New("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if !m.VerifyNotBefore(now, false) { + // TODO(oxisto): this should be replaced with ErrTokenNotValidYet + vErr.Inner = errors.New("Token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/none.go b/vendor/github.com/golang-jwt/jwt/v4/none.go new file mode 100644 index 000000000..f19835d20 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/none.go @@ -0,0 +1,52 @@ +package jwt + +// SigningMethodNone implements the none signing method. This is required by the spec +// but you probably should never use it. +var SigningMethodNone *signingMethodNone + +const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" + +var NoneSignatureTypeDisallowedError error + +type signingMethodNone struct{} +type unsafeNoneMagicConstant string + +func init() { + SigningMethodNone = &signingMethodNone{} + NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) + + RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { + return SigningMethodNone + }) +} + +func (m *signingMethodNone) Alg() string { + return "none" +} + +// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { + // Key must be UnsafeAllowNoneSignatureType to prevent accidentally + // accepting 'none' signing method + if _, ok := key.(unsafeNoneMagicConstant); !ok { + return NoneSignatureTypeDisallowedError + } + // If signing method is none, signature must be an empty string + if signature != "" { + return NewValidationError( + "'none' signing method with non-empty signature", + ValidationErrorSignatureInvalid, + ) + } + + // Accept 'none' signing method. + return nil +} + +// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { + if _, ok := key.(unsafeNoneMagicConstant); ok { + return "", nil + } + return "", NoneSignatureTypeDisallowedError +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/parser.go b/vendor/github.com/golang-jwt/jwt/v4/parser.go new file mode 100644 index 000000000..c0a6f6927 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/parser.go @@ -0,0 +1,177 @@ +package jwt + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +type Parser struct { + // If populated, only these methods will be considered valid. + // + // Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead. + ValidMethods []string + + // Use JSON Number format in JSON decoder. + // + // Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead. + UseJSONNumber bool + + // Skip claims validation during token parsing. + // + // Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead. + SkipClaimsValidation bool +} + +// NewParser creates a new Parser with the specified options +func NewParser(options ...ParserOption) *Parser { + p := &Parser{} + + // loop through our parsing options and apply them + for _, option := range options { + option(p) + } + + return p +} + +// Parse parses, validates, verifies the signature and returns the parsed token. +// keyFunc will receive the parsed token and should return the key for validating. +func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) +} + +// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object implementing the Claims +// interface. This provides default values which can be overridden and allows a caller to use their own type, rather +// than the default MapClaims implementation of Claims. +// +// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims), +// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the +// proper memory for it before passing in the overall claims, otherwise you might run into a panic. +func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + token, parts, err := p.ParseUnverified(tokenString, claims) + if err != nil { + return token, err + } + + // Verify signing method is in the required set + if p.ValidMethods != nil { + var signingMethodValid = false + var alg = token.Method.Alg() + for _, m := range p.ValidMethods { + if m == alg { + signingMethodValid = true + break + } + } + if !signingMethodValid { + // signing method is not in the listed set + return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) + } + } + + // Lookup key + var key interface{} + if keyFunc == nil { + // keyFunc was not provided. short circuiting validation + return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) + } + if key, err = keyFunc(token); err != nil { + // keyFunc returned an error + if ve, ok := err.(*ValidationError); ok { + return token, ve + } + return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} + } + + vErr := &ValidationError{} + + // Validate Claims + if !p.SkipClaimsValidation { + if err := token.Claims.Valid(); err != nil { + + // If the Claims Valid returned an error, check if it is a validation error, + // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set + if e, ok := err.(*ValidationError); !ok { + vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} + } else { + vErr = e + } + } + } + + // Perform validation + token.Signature = parts[2] + if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + vErr.Inner = err + vErr.Errors |= ValidationErrorSignatureInvalid + } + + if vErr.valid() { + token.Valid = true + return token, nil + } + + return token, vErr +} + +// ParseUnverified parses the token but doesn't validate the signature. +// +// WARNING: Don't use this method unless you know what you're doing. +// +// It's only ever useful in cases where you know the signature is valid (because it has +// been checked previously in the stack) and you want to extract values from it. +func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { + parts = strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) + } + + token = &Token{Raw: tokenString} + + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { + return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) + } + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + if err = json.Unmarshal(headerBytes, &token.Header); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // parse Claims + var claimBytes []byte + token.Claims = claims + + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + if p.UseJSONNumber { + dec.UseNumber() + } + // JSON Decode. Special case for map type to avoid weird pointer behavior + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } + // Handle decode error + if err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) + } + } else { + return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + } + + return token, parts, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/parser_option.go b/vendor/github.com/golang-jwt/jwt/v4/parser_option.go new file mode 100644 index 000000000..6ea6f9527 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/parser_option.go @@ -0,0 +1,29 @@ +package jwt + +// ParserOption is used to implement functional-style options that modify the behavior of the parser. To add +// new options, just create a function (ideally beginning with With or Without) that returns an anonymous function that +// takes a *Parser type as input and manipulates its configuration accordingly. +type ParserOption func(*Parser) + +// WithValidMethods is an option to supply algorithm methods that the parser will check. Only those methods will be considered valid. +// It is heavily encouraged to use this option in order to prevent attacks such as https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/. +func WithValidMethods(methods []string) ParserOption { + return func(p *Parser) { + p.ValidMethods = methods + } +} + +// WithJSONNumber is an option to configure the underlying JSON parser with UseNumber +func WithJSONNumber() ParserOption { + return func(p *Parser) { + p.UseJSONNumber = true + } +} + +// WithoutClaimsValidation is an option to disable claims validation. This option should only be used if you exactly know +// what you are doing. +func WithoutClaimsValidation() ParserOption { + return func(p *Parser) { + p.SkipClaimsValidation = true + } +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/rsa.go b/vendor/github.com/golang-jwt/jwt/v4/rsa.go new file mode 100644 index 000000000..b910b19c0 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/rsa.go @@ -0,0 +1,101 @@ +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// SigningMethodRSA implements the RSA family of signing methods. +// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation +type SigningMethodRSA struct { + Name string + Hash crypto.Hash +} + +// Specific instances for RS256 and company +var ( + SigningMethodRS256 *SigningMethodRSA + SigningMethodRS384 *SigningMethodRSA + SigningMethodRS512 *SigningMethodRSA +) + +func init() { + // RS256 + SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { + return SigningMethodRS256 + }) + + // RS384 + SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { + return SigningMethodRS384 + }) + + // RS512 + SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { + return SigningMethodRS512 + }) +} + +func (m *SigningMethodRSA) Alg() string { + return m.Name +} + +// Verify implements token verification for the SigningMethod +// For this signing method, must be an *rsa.PublicKey structure. +func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + var ok bool + + if rsaKey, ok = key.(*rsa.PublicKey); !ok { + return ErrInvalidKeyType + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) +} + +// Sign implements token signing for the SigningMethod +// For this signing method, must be an *rsa.PrivateKey structure. +func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + var ok bool + + // Validate type of key + if rsaKey, ok = key.(*rsa.PrivateKey); !ok { + return "", ErrInvalidKey + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go new file mode 100644 index 000000000..4fd6f9e61 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go @@ -0,0 +1,143 @@ +//go:build go1.4 +// +build go1.4 + +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// SigningMethodRSAPSS implements the RSAPSS family of signing methods signing methods +type SigningMethodRSAPSS struct { + *SigningMethodRSA + Options *rsa.PSSOptions + // VerifyOptions is optional. If set overrides Options for rsa.VerifyPPS. + // Used to accept tokens signed with rsa.PSSSaltLengthAuto, what doesn't follow + // https://tools.ietf.org/html/rfc7518#section-3.5 but was used previously. + // See https://github.com/dgrijalva/jwt-go/issues/285#issuecomment-437451244 for details. + VerifyOptions *rsa.PSSOptions +} + +// Specific instances for RS/PS and company. +var ( + SigningMethodPS256 *SigningMethodRSAPSS + SigningMethodPS384 *SigningMethodRSAPSS + SigningMethodPS512 *SigningMethodRSAPSS +) + +func init() { + // PS256 + SigningMethodPS256 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS256", + Hash: crypto.SHA256, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { + return SigningMethodPS256 + }) + + // PS384 + SigningMethodPS384 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS384", + Hash: crypto.SHA384, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { + return SigningMethodPS384 + }) + + // PS512 + SigningMethodPS512 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS512", + Hash: crypto.SHA512, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { + return SigningMethodPS512 + }) +} + +// Verify implements token verification for the SigningMethod. +// For this verify method, key must be an rsa.PublicKey struct +func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + switch k := key.(type) { + case *rsa.PublicKey: + rsaKey = k + default: + return ErrInvalidKey + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + opts := m.Options + if m.VerifyOptions != nil { + opts = m.VerifyOptions + } + + return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts) +} + +// Sign implements token signing for the SigningMethod. +// For this signing method, key must be an rsa.PrivateKey struct +func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + + switch k := key.(type) { + case *rsa.PrivateKey: + rsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go b/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go new file mode 100644 index 000000000..1966c450b --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go @@ -0,0 +1,105 @@ +package jwt + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrKeyMustBePEMEncoded = errors.New("invalid key: Key must be a PEM encoded PKCS1 or PKCS8 key") + ErrNotRSAPrivateKey = errors.New("key is not a valid RSA private key") + ErrNotRSAPublicKey = errors.New("key is not a valid RSA public key") +) + +// ParseRSAPrivateKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 private key +func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// ParseRSAPrivateKeyFromPEMWithPassword parses a PEM encoded PKCS1 or PKCS8 private key protected with password +// +// Deprecated: This function is deprecated and should not be used anymore. It uses the deprecated x509.DecryptPEMBlock +// function, which was deprecated since RFC 1423 is regarded insecure by design. Unfortunately, there is no alternative +// in the Go standard library for now. See https://github.com/golang/go/issues/8860. +func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + + var blockDecrypted []byte + if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { + return nil, err + } + + if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// ParseRSAPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key +func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *rsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, ErrNotRSAPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/signing_method.go b/vendor/github.com/golang-jwt/jwt/v4/signing_method.go new file mode 100644 index 000000000..241ae9c60 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/signing_method.go @@ -0,0 +1,46 @@ +package jwt + +import ( + "sync" +) + +var signingMethods = map[string]func() SigningMethod{} +var signingMethodLock = new(sync.RWMutex) + +// SigningMethod can be used add new methods for signing or verifying tokens. +type SigningMethod interface { + Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid + Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error + Alg() string // returns the alg identifier for this method (example: 'HS256') +} + +// RegisterSigningMethod registers the "alg" name and a factory function for signing method. +// This is typically done during init() in the method's implementation +func RegisterSigningMethod(alg string, f func() SigningMethod) { + signingMethodLock.Lock() + defer signingMethodLock.Unlock() + + signingMethods[alg] = f +} + +// GetSigningMethod retrieves a signing method from an "alg" string +func GetSigningMethod(alg string) (method SigningMethod) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + if methodF, ok := signingMethods[alg]; ok { + method = methodF() + } + return +} + +// GetAlgorithms returns a list of registered "alg" names +func GetAlgorithms() (algs []string) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + for alg := range signingMethods { + algs = append(algs, alg) + } + return +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf b/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf new file mode 100644 index 000000000..53745d51d --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf @@ -0,0 +1 @@ +checks = ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1023"] diff --git a/vendor/github.com/golang-jwt/jwt/v4/token.go b/vendor/github.com/golang-jwt/jwt/v4/token.go new file mode 100644 index 000000000..786b275ce --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/token.go @@ -0,0 +1,143 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" + "strings" + "time" +) + +// DecodePaddingAllowed will switch the codec used for decoding JWTs respectively. Note that the JWS RFC7515 +// states that the tokens will utilize a Base64url encoding with no padding. Unfortunately, some implementations +// of JWT are producing non-standard tokens, and thus require support for decoding. Note that this is a global +// variable, and updating it will change the behavior on a package level, and is also NOT go-routine safe. +// To use the non-recommended decoding, set this boolean to `true` prior to using this package. +var DecodePaddingAllowed bool + +// DecodeStrict will switch the codec used for decoding JWTs into strict mode. +// In this mode, the decoder requires that trailing padding bits are zero, as described in RFC 4648 section 3.5. +// Note that this is a global variable, and updating it will change the behavior on a package level, and is also NOT go-routine safe. +// To use strict decoding, set this boolean to `true` prior to using this package. +var DecodeStrict bool + +// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). +// You can override it to use another time value. This is useful for testing or if your +// server uses a different time zone than your tokens. +var TimeFunc = time.Now + +// Keyfunc will be used by the Parse methods as a callback function to supply +// the key for verification. The function receives the parsed, +// but unverified Token. This allows you to use properties in the +// Header of the token (such as `kid`) to identify which key to use. +type Keyfunc func(*Token) (interface{}, error) + +// Token represents a JWT Token. Different fields will be used depending on whether you're +// creating or parsing/verifying a token. +type Token struct { + Raw string // The raw token. Populated when you Parse a token + Method SigningMethod // The signing method used or to be used + Header map[string]interface{} // The first segment of the token + Claims Claims // The second segment of the token + Signature string // The third segment of the token. Populated when you Parse a token + Valid bool // Is the token valid? Populated when you Parse/Verify a token +} + +// New creates a new Token with the specified signing method and an empty map of claims. +func New(method SigningMethod) *Token { + return NewWithClaims(method, MapClaims{}) +} + +// NewWithClaims creates a new Token with the specified signing method and claims. +func NewWithClaims(method SigningMethod, claims Claims) *Token { + return &Token{ + Header: map[string]interface{}{ + "typ": "JWT", + "alg": method.Alg(), + }, + Claims: claims, + Method: method, + } +} + +// SignedString creates and returns a complete, signed JWT. +// The token is signed using the SigningMethod specified in the token. +func (t *Token) SignedString(key interface{}) (string, error) { + var sig, sstr string + var err error + if sstr, err = t.SigningString(); err != nil { + return "", err + } + if sig, err = t.Method.Sign(sstr, key); err != nil { + return "", err + } + return strings.Join([]string{sstr, sig}, "."), nil +} + +// SigningString generates the signing string. This is the +// most expensive part of the whole deal. Unless you +// need this for something special, just go straight for +// the SignedString. +func (t *Token) SigningString() (string, error) { + var err error + var jsonValue []byte + + if jsonValue, err = json.Marshal(t.Header); err != nil { + return "", err + } + header := EncodeSegment(jsonValue) + + if jsonValue, err = json.Marshal(t.Claims); err != nil { + return "", err + } + claim := EncodeSegment(jsonValue) + + return strings.Join([]string{header, claim}, "."), nil +} + +// Parse parses, validates, verifies the signature and returns the parsed token. +// keyFunc will receive the parsed token and should return the cryptographic key +// for verifying the signature. +// The caller is strongly encouraged to set the WithValidMethods option to +// validate the 'alg' claim in the token matches the expected algorithm. +// For more details about the importance of validating the 'alg' claim, +// see https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/ +func Parse(tokenString string, keyFunc Keyfunc, options ...ParserOption) (*Token, error) { + return NewParser(options...).Parse(tokenString, keyFunc) +} + +// ParseWithClaims is a shortcut for NewParser().ParseWithClaims(). +// +// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims), +// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the +// proper memory for it before passing in the overall claims, otherwise you might run into a panic. +func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc, options ...ParserOption) (*Token, error) { + return NewParser(options...).ParseWithClaims(tokenString, claims, keyFunc) +} + +// EncodeSegment encodes a JWT specific base64url encoding with padding stripped +// +// Deprecated: In a future release, we will demote this function to a non-exported function, since it +// should only be used internally +func EncodeSegment(seg []byte) string { + return base64.RawURLEncoding.EncodeToString(seg) +} + +// DecodeSegment decodes a JWT specific base64url encoding with padding stripped +// +// Deprecated: In a future release, we will demote this function to a non-exported function, since it +// should only be used internally +func DecodeSegment(seg string) ([]byte, error) { + encoding := base64.RawURLEncoding + + if DecodePaddingAllowed { + if l := len(seg) % 4; l > 0 { + seg += strings.Repeat("=", 4-l) + } + encoding = base64.URLEncoding + } + + if DecodeStrict { + encoding = encoding.Strict() + } + return encoding.DecodeString(seg) +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/types.go b/vendor/github.com/golang-jwt/jwt/v4/types.go new file mode 100644 index 000000000..ac8e140eb --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/types.go @@ -0,0 +1,145 @@ +package jwt + +import ( + "encoding/json" + "fmt" + "math" + "reflect" + "strconv" + "time" +) + +// TimePrecision sets the precision of times and dates within this library. +// This has an influence on the precision of times when comparing expiry or +// other related time fields. Furthermore, it is also the precision of times +// when serializing. +// +// For backwards compatibility the default precision is set to seconds, so that +// no fractional timestamps are generated. +var TimePrecision = time.Second + +// MarshalSingleStringAsArray modifies the behaviour of the ClaimStrings type, especially +// its MarshalJSON function. +// +// If it is set to true (the default), it will always serialize the type as an +// array of strings, even if it just contains one element, defaulting to the behaviour +// of the underlying []string. If it is set to false, it will serialize to a single +// string, if it contains one element. Otherwise, it will serialize to an array of strings. +var MarshalSingleStringAsArray = true + +// NumericDate represents a JSON numeric date value, as referenced at +// https://datatracker.ietf.org/doc/html/rfc7519#section-2. +type NumericDate struct { + time.Time +} + +// NewNumericDate constructs a new *NumericDate from a standard library time.Time struct. +// It will truncate the timestamp according to the precision specified in TimePrecision. +func NewNumericDate(t time.Time) *NumericDate { + return &NumericDate{t.Truncate(TimePrecision)} +} + +// newNumericDateFromSeconds creates a new *NumericDate out of a float64 representing a +// UNIX epoch with the float fraction representing non-integer seconds. +func newNumericDateFromSeconds(f float64) *NumericDate { + round, frac := math.Modf(f) + return NewNumericDate(time.Unix(int64(round), int64(frac*1e9))) +} + +// MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch +// represented in NumericDate to a byte array, using the precision specified in TimePrecision. +func (date NumericDate) MarshalJSON() (b []byte, err error) { + var prec int + if TimePrecision < time.Second { + prec = int(math.Log10(float64(time.Second) / float64(TimePrecision))) + } + truncatedDate := date.Truncate(TimePrecision) + + // For very large timestamps, UnixNano would overflow an int64, but this + // function requires nanosecond level precision, so we have to use the + // following technique to get round the issue: + // 1. Take the normal unix timestamp to form the whole number part of the + // output, + // 2. Take the result of the Nanosecond function, which retuns the offset + // within the second of the particular unix time instance, to form the + // decimal part of the output + // 3. Concatenate them to produce the final result + seconds := strconv.FormatInt(truncatedDate.Unix(), 10) + nanosecondsOffset := strconv.FormatFloat(float64(truncatedDate.Nanosecond())/float64(time.Second), 'f', prec, 64) + + output := append([]byte(seconds), []byte(nanosecondsOffset)[1:]...) + + return output, nil +} + +// UnmarshalJSON is an implementation of the json.RawMessage interface and deserializses a +// NumericDate from a JSON representation, i.e. a json.Number. This number represents an UNIX epoch +// with either integer or non-integer seconds. +func (date *NumericDate) UnmarshalJSON(b []byte) (err error) { + var ( + number json.Number + f float64 + ) + + if err = json.Unmarshal(b, &number); err != nil { + return fmt.Errorf("could not parse NumericData: %w", err) + } + + if f, err = number.Float64(); err != nil { + return fmt.Errorf("could not convert json number value to float: %w", err) + } + + n := newNumericDateFromSeconds(f) + *date = *n + + return nil +} + +// ClaimStrings is basically just a slice of strings, but it can be either serialized from a string array or just a string. +// This type is necessary, since the "aud" claim can either be a single string or an array. +type ClaimStrings []string + +func (s *ClaimStrings) UnmarshalJSON(data []byte) (err error) { + var value interface{} + + if err = json.Unmarshal(data, &value); err != nil { + return err + } + + var aud []string + + switch v := value.(type) { + case string: + aud = append(aud, v) + case []string: + aud = ClaimStrings(v) + case []interface{}: + for _, vv := range v { + vs, ok := vv.(string) + if !ok { + return &json.UnsupportedTypeError{Type: reflect.TypeOf(vv)} + } + aud = append(aud, vs) + } + case nil: + return nil + default: + return &json.UnsupportedTypeError{Type: reflect.TypeOf(v)} + } + + *s = aud + + return +} + +func (s ClaimStrings) MarshalJSON() (b []byte, err error) { + // This handles a special case in the JWT RFC. If the string array, e.g. used by the "aud" field, + // only contains one element, it MAY be serialized as a single string. This may or may not be + // desired based on the ecosystem of other JWT library used, so we make it configurable by the + // variable MarshalSingleStringAsArray. + if len(s) == 1 && !MarshalSingleStringAsArray { + return json.Marshal(s[0]) + } + + return json.Marshal([]string(s)) +} diff --git a/vendor/github.com/hashicorp/errwrap/LICENSE b/vendor/github.com/hashicorp/errwrap/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md new file mode 100644 index 000000000..444df08f8 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/README.md @@ -0,0 +1,89 @@ +# errwrap + +`errwrap` is a package for Go that formalizes the pattern of wrapping errors +and checking if an error contains another error. + +There is a common pattern in Go of taking a returned `error` value and +then wrapping it (such as with `fmt.Errorf`) before returning it. The problem +with this pattern is that you completely lose the original `error` structure. + +Arguably the _correct_ approach is that you should make a custom structure +implementing the `error` interface, and have the original error as a field +on that structure, such [as this example](http://golang.org/pkg/os/#PathError). +This is a good approach, but you have to know the entire chain of possible +rewrapping that happens, when you might just care about one. + +`errwrap` formalizes this pattern (it doesn't matter what approach you use +above) by giving a single interface for wrapping errors, checking if a specific +error is wrapped, and extracting that error. + +## Installation and Docs + +Install using `go get github.com/hashicorp/errwrap`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/errwrap + +## Usage + +#### Basic Usage + +Below is a very basic example of its usage: + +```go +// A function that always returns an error, but wraps it, like a real +// function might. +func tryOpen() error { + _, err := os.Open("/i/dont/exist") + if err != nil { + return errwrap.Wrapf("Doesn't exist: {{err}}", err) + } + + return nil +} + +func main() { + err := tryOpen() + + // We can use the Contains helpers to check if an error contains + // another error. It is safe to do this with a nil error, or with + // an error that doesn't even use the errwrap package. + if errwrap.Contains(err, "does not exist") { + // Do something + } + if errwrap.ContainsType(err, new(os.PathError)) { + // Do something + } + + // Or we can use the associated `Get` functions to just extract + // a specific error. This would return nil if that specific error doesn't + // exist. + perr := errwrap.GetType(err, new(os.PathError)) +} +``` + +#### Custom Types + +If you're already making custom types that properly wrap errors, then +you can get all the functionality of `errwraps.Contains` and such by +implementing the `Wrapper` interface with just one function. Example: + +```go +type AppError { + Code ErrorCode + Err error +} + +func (e *AppError) WrappedErrors() []error { + return []error{e.Err} +} +``` + +Now this works: + +```go +err := &AppError{Err: fmt.Errorf("an error")} +if errwrap.ContainsType(err, fmt.Errorf("")) { + // This will work! +} +``` diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go new file mode 100644 index 000000000..44e368e56 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/errwrap.go @@ -0,0 +1,178 @@ +// Package errwrap implements methods to formalize error wrapping in Go. +// +// All of the top-level functions that take an `error` are built to be able +// to take any error, not just wrapped errors. This allows you to use errwrap +// without having to type-check and type-cast everywhere. +package errwrap + +import ( + "errors" + "reflect" + "strings" +) + +// WalkFunc is the callback called for Walk. +type WalkFunc func(error) + +// Wrapper is an interface that can be implemented by custom types to +// have all the Contains, Get, etc. functions in errwrap work. +// +// When Walk reaches a Wrapper, it will call the callback for every +// wrapped error in addition to the wrapper itself. Since all the top-level +// functions in errwrap use Walk, this means that all those functions work +// with your custom type. +type Wrapper interface { + WrappedErrors() []error +} + +// Wrap defines that outer wraps inner, returning an error type that +// can be cleanly used with the other methods in this package, such as +// Contains, GetAll, etc. +// +// This function won't modify the error message at all (the outer message +// will be used). +func Wrap(outer, inner error) error { + return &wrappedError{ + Outer: outer, + Inner: inner, + } +} + +// Wrapf wraps an error with a formatting message. This is similar to using +// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap +// errors, you should replace it with this. +// +// format is the format of the error message. The string '{{err}}' will +// be replaced with the original error message. +// +// Deprecated: Use fmt.Errorf() +func Wrapf(format string, err error) error { + outerMsg := "" + if err != nil { + outerMsg = err.Error() + } + + outer := errors.New(strings.Replace( + format, "{{err}}", outerMsg, -1)) + + return Wrap(outer, err) +} + +// Contains checks if the given error contains an error with the +// message msg. If err is not a wrapped error, this will always return +// false unless the error itself happens to match this msg. +func Contains(err error, msg string) bool { + return len(GetAll(err, msg)) > 0 +} + +// ContainsType checks if the given error contains an error with +// the same concrete type as v. If err is not a wrapped error, this will +// check the err itself. +func ContainsType(err error, v interface{}) bool { + return len(GetAllType(err, v)) > 0 +} + +// Get is the same as GetAll but returns the deepest matching error. +func Get(err error, msg string) error { + es := GetAll(err, msg) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetType is the same as GetAllType but returns the deepest matching error. +func GetType(err error, v interface{}) error { + es := GetAllType(err, v) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetAll gets all the errors that might be wrapped in err with the +// given message. The order of the errors is such that the outermost +// matching error (the most recent wrap) is index zero, and so on. +func GetAll(err error, msg string) []error { + var result []error + + Walk(err, func(err error) { + if err.Error() == msg { + result = append(result, err) + } + }) + + return result +} + +// GetAllType gets all the errors that are the same type as v. +// +// The order of the return value is the same as described in GetAll. +func GetAllType(err error, v interface{}) []error { + var result []error + + var search string + if v != nil { + search = reflect.TypeOf(v).String() + } + Walk(err, func(err error) { + var needle string + if err != nil { + needle = reflect.TypeOf(err).String() + } + + if needle == search { + result = append(result, err) + } + }) + + return result +} + +// Walk walks all the wrapped errors in err and calls the callback. If +// err isn't a wrapped error, this will be called once for err. If err +// is a wrapped error, the callback will be called for both the wrapper +// that implements error as well as the wrapped error itself. +func Walk(err error, cb WalkFunc) { + if err == nil { + return + } + + switch e := err.(type) { + case *wrappedError: + cb(e.Outer) + Walk(e.Inner, cb) + case Wrapper: + cb(err) + + for _, err := range e.WrappedErrors() { + Walk(err, cb) + } + case interface{ Unwrap() error }: + cb(err) + Walk(e.Unwrap(), cb) + default: + cb(err) + } +} + +// wrappedError is an implementation of error that has both the +// outer and inner errors. +type wrappedError struct { + Outer error + Inner error +} + +func (w *wrappedError) Error() string { + return w.Outer.Error() +} + +func (w *wrappedError) WrappedErrors() []error { + return []error{w.Outer, w.Inner} +} + +func (w *wrappedError) Unwrap() error { + return w.Inner +} diff --git a/vendor/github.com/hashicorp/go-multierror/LICENSE b/vendor/github.com/hashicorp/go-multierror/LICENSE new file mode 100644 index 000000000..82b4de97c --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-multierror/Makefile b/vendor/github.com/hashicorp/go-multierror/Makefile new file mode 100644 index 000000000..b97cd6ed0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/Makefile @@ -0,0 +1,31 @@ +TEST?=./... + +default: test + +# test runs the test suite and vets the code. +test: generate + @echo "==> Running tests..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS} + +# testrace runs the race checker +testrace: generate + @echo "==> Running tests (race)..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -race ${TESTARGS} + +# updatedeps installs all the dependencies needed to run and build. +updatedeps: + @sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'" + +# generate runs `go generate` to build the dynamically generated source files. +generate: + @echo "==> Generating..." + @find . -type f -name '.DS_Store' -delete + @go list ./... \ + | grep -v "/vendor/" \ + | xargs -n1 go generate + +.PHONY: default test testrace updatedeps generate diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md new file mode 100644 index 000000000..71dd308ed --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/README.md @@ -0,0 +1,150 @@ +# go-multierror + +[![CircleCI](https://img.shields.io/circleci/build/github/hashicorp/go-multierror/master)](https://circleci.com/gh/hashicorp/go-multierror) +[![Go Reference](https://pkg.go.dev/badge/github.com/hashicorp/go-multierror.svg)](https://pkg.go.dev/github.com/hashicorp/go-multierror) +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/hashicorp/go-multierror) + +[circleci]: https://app.circleci.com/pipelines/github/hashicorp/go-multierror +[godocs]: https://pkg.go.dev/github.com/hashicorp/go-multierror + +`go-multierror` is a package for Go that provides a mechanism for +representing a list of `error` values as a single `error`. + +This allows a function in Go to return an `error` that might actually +be a list of errors. If the caller knows this, they can unwrap the +list and access the errors. If the caller doesn't know, the error +formats to a nice human-readable format. + +`go-multierror` is fully compatible with the Go standard library +[errors](https://golang.org/pkg/errors/) package, including the +functions `As`, `Is`, and `Unwrap`. This provides a standardized approach +for introspecting on error values. + +## Installation and Docs + +Install using `go get github.com/hashicorp/go-multierror`. + +Full documentation is available at +https://pkg.go.dev/github.com/hashicorp/go-multierror + +### Requires go version 1.13 or newer + +`go-multierror` requires go version 1.13 or newer. Go 1.13 introduced +[error wrapping](https://golang.org/doc/go1.13#error_wrapping), which +this library takes advantage of. + +If you need to use an earlier version of go, you can use the +[v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0) +tag, which doesn't rely on features in go 1.13. + +If you see compile errors that look like the below, it's likely that +you're on an older version of go: + +``` +/go/src/github.com/hashicorp/go-multierror/multierror.go:112:9: undefined: errors.As +/go/src/github.com/hashicorp/go-multierror/multierror.go:117:9: undefined: errors.Is +``` + +## Usage + +go-multierror is easy to use and purposely built to be unobtrusive in +existing Go applications/libraries that may not be aware of it. + +**Building a list of errors** + +The `Append` function is used to create a list of errors. This function +behaves a lot like the Go built-in `append` function: it doesn't matter +if the first argument is nil, a `multierror.Error`, or any other `error`, +the function behaves as you would expect. + +```go +var result error + +if err := step1(); err != nil { + result = multierror.Append(result, err) +} +if err := step2(); err != nil { + result = multierror.Append(result, err) +} + +return result +``` + +**Customizing the formatting of the errors** + +By specifying a custom `ErrorFormat`, you can customize the format +of the `Error() string` function: + +```go +var result *multierror.Error + +// ... accumulate errors here, maybe using Append + +if result != nil { + result.ErrorFormat = func([]error) string { + return "errors!" + } +} +``` + +**Accessing the list of errors** + +`multierror.Error` implements `error` so if the caller doesn't know about +multierror, it will work just fine. But if you're aware a multierror might +be returned, you can use type switches to access the list of errors: + +```go +if err := something(); err != nil { + if merr, ok := err.(*multierror.Error); ok { + // Use merr.Errors + } +} +``` + +You can also use the standard [`errors.Unwrap`](https://golang.org/pkg/errors/#Unwrap) +function. This will continue to unwrap into subsequent errors until none exist. + +**Extracting an error** + +The standard library [`errors.As`](https://golang.org/pkg/errors/#As) +function can be used directly with a multierror to extract a specific error: + +```go +// Assume err is a multierror value +err := somefunc() + +// We want to know if "err" has a "RichErrorType" in it and extract it. +var errRich RichErrorType +if errors.As(err, &errRich) { + // It has it, and now errRich is populated. +} +``` + +**Checking for an exact error value** + +Some errors are returned as exact errors such as the [`ErrNotExist`](https://golang.org/pkg/os/#pkg-variables) +error in the `os` package. You can check if this error is present by using +the standard [`errors.Is`](https://golang.org/pkg/errors/#Is) function. + +```go +// Assume err is a multierror value +err := somefunc() +if errors.Is(err, os.ErrNotExist) { + // err contains os.ErrNotExist +} +``` + +**Returning a multierror only if there are errors** + +If you build a `multierror.Error`, you can use the `ErrorOrNil` function +to return an `error` implementation only if there are errors to return: + +```go +var result *multierror.Error + +// ... accumulate errors here + +// Return the `error` only if errors were added to the multierror, otherwise +// return nil since there are no errors. +return result.ErrorOrNil() +``` diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go new file mode 100644 index 000000000..3e2589bfd --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/append.go @@ -0,0 +1,43 @@ +package multierror + +// Append is a helper function that will append more errors +// onto an Error in order to create a larger multi-error. +// +// If err is not a multierror.Error, then it will be turned into +// one. If any of the errs are multierr.Error, they will be flattened +// one level into err. +// Any nil errors within errs will be ignored. If err is nil, a new +// *Error will be returned. +func Append(err error, errs ...error) *Error { + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Go through each error and flatten + for _, e := range errs { + switch e := e.(type) { + case *Error: + if e != nil { + err.Errors = append(err.Errors, e.Errors...) + } + default: + if e != nil { + err.Errors = append(err.Errors, e) + } + } + } + + return err + default: + newErrs := make([]error, 0, len(errs)+1) + if err != nil { + newErrs = append(newErrs, err) + } + newErrs = append(newErrs, errs...) + + return Append(&Error{}, newErrs...) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/flatten.go b/vendor/github.com/hashicorp/go-multierror/flatten.go new file mode 100644 index 000000000..aab8e9abe --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/flatten.go @@ -0,0 +1,26 @@ +package multierror + +// Flatten flattens the given error, merging any *Errors together into +// a single *Error. +func Flatten(err error) error { + // If it isn't an *Error, just return the error as-is + if _, ok := err.(*Error); !ok { + return err + } + + // Otherwise, make the result and flatten away! + flatErr := new(Error) + flatten(err, flatErr) + return flatErr +} + +func flatten(err error, flatErr *Error) { + switch err := err.(type) { + case *Error: + for _, e := range err.Errors { + flatten(e, flatErr) + } + default: + flatErr.Errors = append(flatErr.Errors, err) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go new file mode 100644 index 000000000..47f13c49a --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/format.go @@ -0,0 +1,27 @@ +package multierror + +import ( + "fmt" + "strings" +) + +// ErrorFormatFunc is a function callback that is called by Error to +// turn the list of errors into a string. +type ErrorFormatFunc func([]error) string + +// ListFormatFunc is a basic formatter that outputs the number of errors +// that occurred along with a bullet point list of the errors. +func ListFormatFunc(es []error) string { + if len(es) == 1 { + return fmt.Sprintf("1 error occurred:\n\t* %s\n\n", es[0]) + } + + points := make([]string, len(es)) + for i, err := range es { + points[i] = fmt.Sprintf("* %s", err) + } + + return fmt.Sprintf( + "%d errors occurred:\n\t%s\n\n", + len(es), strings.Join(points, "\n\t")) +} diff --git a/vendor/github.com/hashicorp/go-multierror/group.go b/vendor/github.com/hashicorp/go-multierror/group.go new file mode 100644 index 000000000..9c29efb7f --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/group.go @@ -0,0 +1,38 @@ +package multierror + +import "sync" + +// Group is a collection of goroutines which return errors that need to be +// coalesced. +type Group struct { + mutex sync.Mutex + err *Error + wg sync.WaitGroup +} + +// Go calls the given function in a new goroutine. +// +// If the function returns an error it is added to the group multierror which +// is returned by Wait. +func (g *Group) Go(f func() error) { + g.wg.Add(1) + + go func() { + defer g.wg.Done() + + if err := f(); err != nil { + g.mutex.Lock() + g.err = Append(g.err, err) + g.mutex.Unlock() + } + }() +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the multierror. +func (g *Group) Wait() *Error { + g.wg.Wait() + g.mutex.Lock() + defer g.mutex.Unlock() + return g.err +} diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go new file mode 100644 index 000000000..f54574326 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/multierror.go @@ -0,0 +1,121 @@ +package multierror + +import ( + "errors" + "fmt" +) + +// Error is an error type to track multiple errors. This is used to +// accumulate errors in cases and return them as a single "error". +type Error struct { + Errors []error + ErrorFormat ErrorFormatFunc +} + +func (e *Error) Error() string { + fn := e.ErrorFormat + if fn == nil { + fn = ListFormatFunc + } + + return fn(e.Errors) +} + +// ErrorOrNil returns an error interface if this Error represents +// a list of errors, or returns nil if the list of errors is empty. This +// function is useful at the end of accumulation to make sure that the value +// returned represents the existence of errors. +func (e *Error) ErrorOrNil() error { + if e == nil { + return nil + } + if len(e.Errors) == 0 { + return nil + } + + return e +} + +func (e *Error) GoString() string { + return fmt.Sprintf("*%#v", *e) +} + +// WrappedErrors returns the list of errors that this Error is wrapping. It is +// an implementation of the errwrap.Wrapper interface so that multierror.Error +// can be used with that library. +// +// This method is not safe to be called concurrently. Unlike accessing the +// Errors field directly, this function also checks if the multierror is nil to +// prevent a null-pointer panic. It satisfies the errwrap.Wrapper interface. +func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } + return e.Errors +} + +// Unwrap returns an error from Error (or nil if there are no errors). +// This error returned will further support Unwrap to get the next error, +// etc. The order will match the order of Errors in the multierror.Error +// at the time of calling. +// +// The resulting error supports errors.As/Is/Unwrap so you can continue +// to use the stdlib errors package to introspect further. +// +// This will perform a shallow copy of the errors slice. Any errors appended +// to this error after calling Unwrap will not be available until a new +// Unwrap is called on the multierror.Error. +func (e *Error) Unwrap() error { + // If we have no errors then we do nothing + if e == nil || len(e.Errors) == 0 { + return nil + } + + // If we have exactly one error, we can just return that directly. + if len(e.Errors) == 1 { + return e.Errors[0] + } + + // Shallow copy the slice + errs := make([]error, len(e.Errors)) + copy(errs, e.Errors) + return chain(errs) +} + +// chain implements the interfaces necessary for errors.Is/As/Unwrap to +// work in a deterministic way with multierror. A chain tracks a list of +// errors while accounting for the current represented error. This lets +// Is/As be meaningful. +// +// Unwrap returns the next error. In the cleanest form, Unwrap would return +// the wrapped error here but we can't do that if we want to properly +// get access to all the errors. Instead, users are recommended to use +// Is/As to get the correct error type out. +// +// Precondition: []error is non-empty (len > 0) +type chain []error + +// Error implements the error interface +func (e chain) Error() string { + return e[0].Error() +} + +// Unwrap implements errors.Unwrap by returning the next error in the +// chain or nil if there are no more errors. +func (e chain) Unwrap() error { + if len(e) == 1 { + return nil + } + + return e[1:] +} + +// As implements errors.As by attempting to map to the current value. +func (e chain) As(target interface{}) bool { + return errors.As(e[0], target) +} + +// Is implements errors.Is by comparing the current value directly. +func (e chain) Is(target error) bool { + return errors.Is(e[0], target) +} diff --git a/vendor/github.com/hashicorp/go-multierror/prefix.go b/vendor/github.com/hashicorp/go-multierror/prefix.go new file mode 100644 index 000000000..5c477abe4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/prefix.go @@ -0,0 +1,37 @@ +package multierror + +import ( + "fmt" + + "github.com/hashicorp/errwrap" +) + +// Prefix is a helper function that will prefix some text +// to the given error. If the error is a multierror.Error, then +// it will be prefixed to each wrapped error. +// +// This is useful to use when appending multiple multierrors +// together in order to give better scoping. +func Prefix(err error, prefix string) error { + if err == nil { + return nil + } + + format := fmt.Sprintf("%s {{err}}", prefix) + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Wrap each of the errors + for i, e := range err.Errors { + err.Errors[i] = errwrap.Wrapf(format, e) + } + + return err + default: + return errwrap.Wrapf(format, err) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/sort.go b/vendor/github.com/hashicorp/go-multierror/sort.go new file mode 100644 index 000000000..fecb14e81 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/sort.go @@ -0,0 +1,16 @@ +package multierror + +// Len implements sort.Interface function for length +func (err Error) Len() int { + return len(err.Errors) +} + +// Swap implements sort.Interface function for swapping elements +func (err Error) Swap(i, j int) { + err.Errors[i], err.Errors[j] = err.Errors[j], err.Errors[i] +} + +// Less implements sort.Interface function for determining order +func (err Error) Less(i, j int) bool { + return err.Errors[i].Error() < err.Errors[j].Error() +} diff --git a/vendor/github.com/jlaffaye/ftp/LICENSE b/vendor/github.com/jlaffaye/ftp/LICENSE new file mode 100644 index 000000000..9ab085c51 --- /dev/null +++ b/vendor/github.com/jlaffaye/ftp/LICENSE @@ -0,0 +1,13 @@ +Copyright (c) 2011-2013, Julien Laffaye + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/jlaffaye/ftp/README.md b/vendor/github.com/jlaffaye/ftp/README.md new file mode 100644 index 000000000..8436b0ef7 --- /dev/null +++ b/vendor/github.com/jlaffaye/ftp/README.md @@ -0,0 +1,63 @@ +# goftp # + +[![Units tests](https://github.com/jlaffaye/ftp/actions/workflows/unit_tests.yaml/badge.svg)](https://github.com/jlaffaye/ftp/actions/workflows/unit_tests.yaml) +[![Coverage Status](https://coveralls.io/repos/jlaffaye/ftp/badge.svg?branch=master&service=github)](https://coveralls.io/github/jlaffaye/ftp?branch=master) +[![golangci-lint](https://github.com/jlaffaye/ftp/actions/workflows/golangci-lint.yaml/badge.svg)](https://github.com/jlaffaye/ftp/actions/workflows/golangci-lint.yaml) +[![CodeQL](https://github.com/jlaffaye/ftp/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/jlaffaye/ftp/actions/workflows/codeql-analysis.yml) +[![Go ReportCard](https://goreportcard.com/badge/jlaffaye/ftp)](http://goreportcard.com/report/jlaffaye/ftp) +[![Go Reference](https://pkg.go.dev/badge/github.com/jlaffaye/ftp.svg)](https://pkg.go.dev/github.com/jlaffaye/ftp) + +A FTP client package for Go + +## Install ## + +``` +go get -u github.com/jlaffaye/ftp +``` + +## Documentation ## + +https://pkg.go.dev/github.com/jlaffaye/ftp + +## Example ## + +```go +c, err := ftp.Dial("ftp.example.org:21", ftp.DialWithTimeout(5*time.Second)) +if err != nil { + log.Fatal(err) +} + +err = c.Login("anonymous", "anonymous") +if err != nil { + log.Fatal(err) +} + +// Do something with the FTP conn + +if err := c.Quit(); err != nil { + log.Fatal(err) +} +``` + +## Store a file example ## + +```go +data := bytes.NewBufferString("Hello World") +err = c.Stor("test-file.txt", data) +if err != nil { + panic(err) +} +``` + +## Read a file example ## + +```go +r, err := c.Retr("test-file.txt") +if err != nil { + panic(err) +} +defer r.Close() + +buf, err := ioutil.ReadAll(r) +println(string(buf)) +``` diff --git a/vendor/github.com/jlaffaye/ftp/debug.go b/vendor/github.com/jlaffaye/ftp/debug.go new file mode 100644 index 000000000..5973064ef --- /dev/null +++ b/vendor/github.com/jlaffaye/ftp/debug.go @@ -0,0 +1,37 @@ +package ftp + +import "io" + +type debugWrapper struct { + conn io.ReadWriteCloser + io.Reader + io.Writer +} + +func newDebugWrapper(conn io.ReadWriteCloser, w io.Writer) io.ReadWriteCloser { + return &debugWrapper{ + Reader: io.TeeReader(conn, w), + Writer: io.MultiWriter(w, conn), + conn: conn, + } +} + +func (w *debugWrapper) Close() error { + return w.conn.Close() +} + +type streamDebugWrapper struct { + io.Reader + closer io.ReadCloser +} + +func newStreamDebugWrapper(rd io.ReadCloser, w io.Writer) io.ReadCloser { + return &streamDebugWrapper{ + Reader: io.TeeReader(rd, w), + closer: rd, + } +} + +func (w *streamDebugWrapper) Close() error { + return w.closer.Close() +} diff --git a/vendor/github.com/jlaffaye/ftp/ftp.go b/vendor/github.com/jlaffaye/ftp/ftp.go new file mode 100644 index 000000000..02136ae6a --- /dev/null +++ b/vendor/github.com/jlaffaye/ftp/ftp.go @@ -0,0 +1,1160 @@ +// Package ftp implements a FTP client as described in RFC 959. +// +// A textproto.Error is returned for errors at the protocol level. +package ftp + +import ( + "bufio" + "context" + "crypto/tls" + "errors" + "io" + "net" + "net/textproto" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-multierror" +) + +const ( + // 30 seconds was chosen as it's the + // same duration as http.DefaultTransport's timeout. + DefaultDialTimeout = 30 * time.Second +) + +// EntryType describes the different types of an Entry. +type EntryType int + +// The differents types of an Entry +const ( + EntryTypeFile EntryType = iota + EntryTypeFolder + EntryTypeLink +) + +// TransferType denotes the formats for transferring Entries. +type TransferType string + +// The different transfer types +const ( + TransferTypeBinary = TransferType("I") + TransferTypeASCII = TransferType("A") +) + +// Time format used by the MDTM and MFMT commands +const timeFormat = "20060102150405" + +// ServerConn represents the connection to a remote FTP server. +// A single connection only supports one in-flight data connection. +// It is not safe to be called concurrently. +type ServerConn struct { + options *dialOptions + conn *textproto.Conn // connection wrapper for text protocol + netConn net.Conn // underlying network connection + host string + + // Server capabilities discovered at runtime + features map[string]string + skipEPSV bool + mlstSupported bool + mfmtSupported bool + mdtmSupported bool + mdtmCanWrite bool + usePRET bool +} + +// DialOption represents an option to start a new connection with Dial +type DialOption struct { + setup func(do *dialOptions) +} + +// dialOptions contains all the options set by DialOption.setup +type dialOptions struct { + context context.Context + dialer net.Dialer + tlsConfig *tls.Config + explicitTLS bool + disableEPSV bool + disableUTF8 bool + disableMLSD bool + writingMDTM bool + forceListHidden bool + location *time.Location + debugOutput io.Writer + dialFunc func(network, address string) (net.Conn, error) + shutTimeout time.Duration // time to wait for data connection closing status +} + +// Entry describes a file and is returned by List(). +type Entry struct { + Name string + Target string // target of symbolic link + Type EntryType + Size uint64 + Time time.Time +} + +// Response represents a data-connection +type Response struct { + conn net.Conn + c *ServerConn + closed bool +} + +// Dial connects to the specified address with optional options +func Dial(addr string, options ...DialOption) (*ServerConn, error) { + do := &dialOptions{} + for _, option := range options { + option.setup(do) + } + + if do.location == nil { + do.location = time.UTC + } + + dialFunc := do.dialFunc + + if dialFunc == nil { + ctx := do.context + + if ctx == nil { + ctx = context.Background() + } + if _, ok := ctx.Deadline(); !ok { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, DefaultDialTimeout) + defer cancel() + } + + if do.tlsConfig != nil && !do.explicitTLS { + dialFunc = func(network, address string) (net.Conn, error) { + tlsDialer := &tls.Dialer{ + NetDialer: &do.dialer, + Config: do.tlsConfig, + } + return tlsDialer.DialContext(ctx, network, addr) + } + } else { + + dialFunc = func(network, address string) (net.Conn, error) { + return do.dialer.DialContext(ctx, network, addr) + } + } + } + + tconn, err := dialFunc("tcp", addr) + if err != nil { + return nil, err + } + + // Use the resolved IP address in case addr contains a domain name + // If we use the domain name, we might not resolve to the same IP. + remoteAddr := tconn.RemoteAddr().(*net.TCPAddr) + + c := &ServerConn{ + options: do, + features: make(map[string]string), + conn: textproto.NewConn(do.wrapConn(tconn)), + netConn: tconn, + host: remoteAddr.IP.String(), + } + + _, _, err = c.conn.ReadResponse(StatusReady) + if err != nil { + _ = c.Quit() + return nil, err + } + + if do.explicitTLS { + if err := c.authTLS(); err != nil { + _ = c.Quit() + return nil, err + } + tconn = tls.Client(tconn, do.tlsConfig) + c.conn = textproto.NewConn(do.wrapConn(tconn)) + } + + return c, nil +} + +// DialWithTimeout returns a DialOption that configures the ServerConn with specified timeout +func DialWithTimeout(timeout time.Duration) DialOption { + return DialOption{func(do *dialOptions) { + do.dialer.Timeout = timeout + }} +} + +// DialWithShutTimeout returns a DialOption that configures the ServerConn with +// maximum time to wait for the data closing status on control connection +// and nudging the control connection deadline before reading status. +func DialWithShutTimeout(shutTimeout time.Duration) DialOption { + return DialOption{func(do *dialOptions) { + do.shutTimeout = shutTimeout + }} +} + +// DialWithDialer returns a DialOption that configures the ServerConn with specified net.Dialer +func DialWithDialer(dialer net.Dialer) DialOption { + return DialOption{func(do *dialOptions) { + do.dialer = dialer + }} +} + +// DialWithNetConn returns a DialOption that configures the ServerConn with the underlying net.Conn +// +// Deprecated: Use [DialWithDialFunc] instead +func DialWithNetConn(conn net.Conn) DialOption { + return DialWithDialFunc(func(network, address string) (net.Conn, error) { + return conn, nil + }) +} + +// DialWithDisabledEPSV returns a DialOption that configures the ServerConn with EPSV disabled +// Note that EPSV is only used when advertised in the server features. +func DialWithDisabledEPSV(disabled bool) DialOption { + return DialOption{func(do *dialOptions) { + do.disableEPSV = disabled + }} +} + +// DialWithDisabledUTF8 returns a DialOption that configures the ServerConn with UTF8 option disabled +func DialWithDisabledUTF8(disabled bool) DialOption { + return DialOption{func(do *dialOptions) { + do.disableUTF8 = disabled + }} +} + +// DialWithDisabledMLSD returns a DialOption that configures the ServerConn with MLSD option disabled +// +// This is useful for servers which advertise MLSD (eg some versions +// of Serv-U) but don't support it properly. +func DialWithDisabledMLSD(disabled bool) DialOption { + return DialOption{func(do *dialOptions) { + do.disableMLSD = disabled + }} +} + +// DialWithWritingMDTM returns a DialOption making ServerConn use MDTM to set file time +// +// This option addresses a quirk in the VsFtpd server which doesn't support +// the MFMT command for setting file time like other servers but by default +// uses the MDTM command with non-standard arguments for that. +// See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html +func DialWithWritingMDTM(enabled bool) DialOption { + return DialOption{func(do *dialOptions) { + do.writingMDTM = enabled + }} +} + +// DialWithForceListHidden returns a DialOption making ServerConn use LIST -a to include hidden files and folders in directory listings +// +// This is useful for servers that do not do this by default, but it forces the use of the LIST command +// even if the server supports MLST. +func DialWithForceListHidden(enabled bool) DialOption { + return DialOption{func(do *dialOptions) { + do.forceListHidden = enabled + }} +} + +// DialWithLocation returns a DialOption that configures the ServerConn with specified time.Location +// The location is used to parse the dates sent by the server which are in server's timezone +func DialWithLocation(location *time.Location) DialOption { + return DialOption{func(do *dialOptions) { + do.location = location + }} +} + +// DialWithContext returns a DialOption that configures the ServerConn with specified context +// The context will be used for the initial connection setup +func DialWithContext(ctx context.Context) DialOption { + return DialOption{func(do *dialOptions) { + do.context = ctx + }} +} + +// DialWithTLS returns a DialOption that configures the ServerConn with specified TLS config +// +// If called together with the DialWithDialFunc option, the DialWithDialFunc function +// will be used when dialing new connections but regardless of the function, +// the connection will be treated as a TLS connection. +func DialWithTLS(tlsConfig *tls.Config) DialOption { + return DialOption{func(do *dialOptions) { + do.tlsConfig = tlsConfig + }} +} + +// DialWithExplicitTLS returns a DialOption that configures the ServerConn to be upgraded to TLS +// See DialWithTLS for general TLS documentation +func DialWithExplicitTLS(tlsConfig *tls.Config) DialOption { + return DialOption{func(do *dialOptions) { + do.explicitTLS = true + do.tlsConfig = tlsConfig + }} +} + +// DialWithDebugOutput returns a DialOption that configures the ServerConn to write to the Writer +// everything it reads from the server +func DialWithDebugOutput(w io.Writer) DialOption { + return DialOption{func(do *dialOptions) { + do.debugOutput = w + }} +} + +// DialWithDialFunc returns a DialOption that configures the ServerConn to use the +// specified function to establish both control and data connections +// +// If used together with the DialWithNetConn option, the DialWithNetConn +// takes precedence for the control connection, while data connections will +// be established using function specified with the DialWithDialFunc option +func DialWithDialFunc(f func(network, address string) (net.Conn, error)) DialOption { + return DialOption{func(do *dialOptions) { + do.dialFunc = f + }} +} + +func (o *dialOptions) wrapConn(netConn net.Conn) io.ReadWriteCloser { + if o.debugOutput == nil { + return netConn + } + + return newDebugWrapper(netConn, o.debugOutput) +} + +func (o *dialOptions) wrapStream(rd io.ReadCloser) io.ReadCloser { + if o.debugOutput == nil { + return rd + } + + return newStreamDebugWrapper(rd, o.debugOutput) +} + +// Connect is an alias to Dial, for backward compatibility +// +// Deprecated: Use [Dial] instead +func Connect(addr string) (*ServerConn, error) { + return Dial(addr) +} + +// DialTimeout initializes the connection to the specified ftp server address. +// +// Deprecated: Use [Dial] with [DialWithTimeout] option instead +func DialTimeout(addr string, timeout time.Duration) (*ServerConn, error) { + return Dial(addr, DialWithTimeout(timeout)) +} + +// Login authenticates the client with specified user and password. +// +// "anonymous"/"anonymous" is a common user/password scheme for FTP servers +// that allows anonymous read-only accounts. +func (c *ServerConn) Login(user, password string) error { + code, message, err := c.cmd(-1, "USER %s", user) + if err != nil { + return err + } + + switch code { + case StatusLoggedIn: + case StatusUserOK: + _, _, err = c.cmd(StatusLoggedIn, "PASS %s", password) + if err != nil { + return err + } + default: + return errors.New(message) + } + + // Probe features + err = c.feat() + if err != nil { + return err + } + if _, mlstSupported := c.features["MLST"]; mlstSupported && !c.options.disableMLSD { + c.mlstSupported = true + } + _, c.usePRET = c.features["PRET"] + + _, c.mfmtSupported = c.features["MFMT"] + _, c.mdtmSupported = c.features["MDTM"] + c.mdtmCanWrite = c.mdtmSupported && c.options.writingMDTM + + // Switch to binary mode + if err = c.Type(TransferTypeBinary); err != nil { + return err + } + + // Switch to UTF-8 + if !c.options.disableUTF8 { + err = c.setUTF8() + } + + // If using implicit TLS, make data connections also use TLS + if c.options.tlsConfig != nil { + if _, _, err = c.cmd(StatusCommandOK, "PBSZ 0"); err != nil { + return err + } + if _, _, err = c.cmd(StatusCommandOK, "PROT P"); err != nil { + return err + } + } + + return err +} + +// authTLS upgrades the connection to use TLS +func (c *ServerConn) authTLS() error { + _, _, err := c.cmd(StatusAuthOK, "AUTH TLS") + return err +} + +// feat issues a FEAT FTP command to list the additional commands supported by +// the remote FTP server. +// FEAT is described in RFC 2389 +func (c *ServerConn) feat() error { + code, message, err := c.cmd(-1, "FEAT") + if err != nil { + return err + } + + if code != StatusSystem { + // The server does not support the FEAT command. This is not an + // error: we consider that there is no additional feature. + return nil + } + + lines := strings.Split(message, "\n") + for _, line := range lines { + if !strings.HasPrefix(line, " ") { + continue + } + + line = strings.TrimSpace(line) + featureElements := strings.SplitN(line, " ", 2) + + command := featureElements[0] + + var commandDesc string + if len(featureElements) == 2 { + commandDesc = featureElements[1] + } + + c.features[command] = commandDesc + } + + return nil +} + +// setUTF8 issues an "OPTS UTF8 ON" command. +func (c *ServerConn) setUTF8() error { + if _, ok := c.features["UTF8"]; !ok { + return nil + } + + code, message, err := c.cmd(-1, "OPTS UTF8 ON") + if err != nil { + return err + } + + // Workaround for FTP servers, that does not support this option. + if code == StatusBadArguments || code == StatusNotImplementedParameter { + return nil + } + + // The ftpd "filezilla-server" has FEAT support for UTF8, but always returns + // "202 UTF8 mode is always enabled. No need to send this command." when + // trying to use it. That's OK + if code == StatusCommandNotImplemented { + return nil + } + + if code != StatusCommandOK { + return errors.New(message) + } + + return nil +} + +// epsv issues an "EPSV" command to get a port number for a data connection. +func (c *ServerConn) epsv() (port int, err error) { + _, line, err := c.cmd(StatusExtendedPassiveMode, "EPSV") + if err != nil { + return 0, err + } + + start := strings.Index(line, "|||") + end := strings.LastIndex(line, "|") + if start == -1 || end == -1 { + return 0, errors.New("invalid EPSV response format") + } + port, err = strconv.Atoi(line[start+3 : end]) + return port, err +} + +// pasv issues a "PASV" command to get a port number for a data connection. +func (c *ServerConn) pasv() (host string, port int, err error) { + _, line, err := c.cmd(StatusPassiveMode, "PASV") + if err != nil { + return "", 0, err + } + + // PASV response format : 227 Entering Passive Mode (h1,h2,h3,h4,p1,p2). + start := strings.Index(line, "(") + end := strings.LastIndex(line, ")") + if start == -1 || end == -1 { + return "", 0, errors.New("invalid PASV response format") + } + + // We have to split the response string + pasvData := strings.Split(line[start+1:end], ",") + + if len(pasvData) < 6 { + return "", 0, errors.New("invalid PASV response format") + } + + // Let's compute the port number + portPart1, err := strconv.Atoi(pasvData[4]) + if err != nil { + return "", 0, err + } + + portPart2, err := strconv.Atoi(pasvData[5]) + if err != nil { + return "", 0, err + } + + // Recompose port + port = portPart1*256 + portPart2 + + // Make the IP address to connect to + host = strings.Join(pasvData[0:4], ".") + + if c.host != host { + if cmdIP := net.ParseIP(c.host); cmdIP != nil { + if dataIP := net.ParseIP(host); dataIP != nil { + if isBogusDataIP(cmdIP, dataIP) { + return c.host, port, nil + } + } + } + } + return host, port, nil +} + +func isBogusDataIP(cmdIP, dataIP net.IP) bool { + // Logic stolen from lftp (https://github.com/lavv17/lftp/blob/d67fc14d085849a6b0418bb3e912fea2e94c18d1/src/ftpclass.cc#L769) + return dataIP.IsMulticast() || + cmdIP.IsPrivate() != dataIP.IsPrivate() || + cmdIP.IsLoopback() != dataIP.IsLoopback() +} + +// getDataConnPort returns a host, port for a new data connection +// it uses the best available method to do so +func (c *ServerConn) getDataConnPort() (string, int, error) { + if !c.options.disableEPSV && !c.skipEPSV { + if port, err := c.epsv(); err == nil { + return c.host, port, nil + } + + // if there is an error, skip EPSV for the next attempts + c.skipEPSV = true + } + + return c.pasv() +} + +// openDataConn creates a new FTP data connection. +func (c *ServerConn) openDataConn() (net.Conn, error) { + host, port, err := c.getDataConnPort() + if err != nil { + return nil, err + } + + addr := net.JoinHostPort(host, strconv.Itoa(port)) + if c.options.dialFunc != nil { + return c.options.dialFunc("tcp", addr) + } + + if c.options.tlsConfig != nil { + // We don't use tls.DialWithDialer here (which does Dial, create + // the Client and then do the Handshake) because it seems to + // hang with some FTP servers, namely proftpd and pureftpd. + // + // Instead we do Dial, create the Client and wait for the first + // Read or Write to trigger the Handshake. + // + // This means that if we are uploading a zero sized file, we + // need to make sure we do the Handshake explicitly as Write + // won't have been called. This is done in StorFrom(). + // + // See: https://github.com/jlaffaye/ftp/issues/282 + conn, err := c.options.dialer.Dial("tcp", addr) + if err != nil { + return nil, err + } + tlsConn := tls.Client(conn, c.options.tlsConfig) + return tlsConn, nil + } + + return c.options.dialer.Dial("tcp", addr) +} + +// cmd is a helper function to execute a command and check for the expected FTP +// return code +func (c *ServerConn) cmd(expected int, format string, args ...interface{}) (int, string, error) { + _, err := c.conn.Cmd(format, args...) + if err != nil { + return 0, "", err + } + + return c.conn.ReadResponse(expected) +} + +// cmdDataConnFrom executes a command which require a FTP data connection. +// Issues a REST FTP command to specify the number of bytes to skip for the transfer. +func (c *ServerConn) cmdDataConnFrom(offset uint64, format string, args ...interface{}) (net.Conn, error) { + // If server requires PRET send the PRET command to warm it up + // See: https://tools.ietf.org/html/draft-dd-pret-00 + if c.usePRET { + _, _, err := c.cmd(-1, "PRET "+format, args...) + if err != nil { + return nil, err + } + } + + conn, err := c.openDataConn() + if err != nil { + return nil, err + } + + if offset != 0 { + _, _, err = c.cmd(StatusRequestFilePending, "REST %d", offset) + if err != nil { + _ = conn.Close() + return nil, err + } + } + + _, err = c.conn.Cmd(format, args...) + if err != nil { + _ = conn.Close() + return nil, err + } + + code, msg, err := c.conn.ReadResponse(-1) + if err != nil { + _ = conn.Close() + return nil, err + } + if code != StatusAlreadyOpen && code != StatusAboutToSend { + _ = conn.Close() + return nil, &textproto.Error{Code: code, Msg: msg} + } + + return conn, nil +} + +// Type switches the transfer mode for the connection. +func (c *ServerConn) Type(transferType TransferType) (err error) { + _, _, err = c.cmd(StatusCommandOK, "TYPE "+string(transferType)) + return err +} + +// NameList issues an NLST FTP command. +func (c *ServerConn) NameList(path string) (entries []string, err error) { + space := " " + if path == "" { + space = "" + } + conn, err := c.cmdDataConnFrom(0, "NLST%s%s", space, path) + if err != nil { + return nil, err + } + + var errs *multierror.Error + + r := &Response{conn: conn, c: c} + + scanner := bufio.NewScanner(c.options.wrapStream(r)) + for scanner.Scan() { + entries = append(entries, scanner.Text()) + } + + if err := scanner.Err(); err != nil { + errs = multierror.Append(errs, err) + } + if err := r.Close(); err != nil { + errs = multierror.Append(errs, err) + } + + return entries, errs.ErrorOrNil() +} + +// List issues a LIST FTP command. +func (c *ServerConn) List(path string) (entries []*Entry, err error) { + var cmd string + var parser parseFunc + + if c.mlstSupported && !c.options.forceListHidden { + cmd = "MLSD" + parser = parseRFC3659ListLine + } else { + cmd = "LIST" + if c.options.forceListHidden { + cmd += " -a" + } + parser = parseListLine + } + + space := " " + if path == "" { + space = "" + } + conn, err := c.cmdDataConnFrom(0, "%s%s%s", cmd, space, path) + if err != nil { + return nil, err + } + + var errs *multierror.Error + + r := &Response{conn: conn, c: c} + + scanner := bufio.NewScanner(c.options.wrapStream(r)) + now := time.Now() + for scanner.Scan() { + entry, errParse := parser(scanner.Text(), now, c.options.location) + if errParse == nil { + entries = append(entries, entry) + } + } + + if err := scanner.Err(); err != nil { + errs = multierror.Append(errs, err) + } + if err := r.Close(); err != nil { + errs = multierror.Append(errs, err) + } + + return entries, errs.ErrorOrNil() +} + +// GetEntry issues a MLST FTP command which retrieves one single Entry using the +// control connection. The returnedEntry will describe the current directory +// when no path is given. +func (c *ServerConn) GetEntry(path string) (entry *Entry, err error) { + if !c.mlstSupported { + return nil, &textproto.Error{Code: StatusNotImplemented, Msg: StatusText(StatusNotImplemented)} + } + space := " " + if path == "" { + space = "" + } + _, msg, err := c.cmd(StatusRequestedFileActionOK, "%s%s%s", "MLST", space, path) + if err != nil { + return nil, err + } + + // The expected reply will look something like: + // + // 250-File details + // Type=file;Size=1024;Modify=20220813133357; path + // 250 End + // + // Multiple lines are allowed though, so it can also be in the form: + // + // 250-File details + // Type=file;Size=1024; path + // Modify=20220813133357; path + // 250 End + lines := strings.Split(msg, "\n") + lc := len(lines) + + // lines must be a multi-line message with a length of 3 or more, and we + // don't care about the first and last line + if lc < 3 { + return nil, errors.New("invalid response") + } + + e := &Entry{} + for _, l := range lines[1 : lc-1] { + // According to RFC 3659, the entry lines must start with a space when passed over the + // control connection. Some servers don't seem to add that space though. Both forms are + // accepted here. + if len(l) > 0 && l[0] == ' ' { + l = l[1:] + } + // Some severs seem to send a blank line at the end which we ignore + if l == "" { + continue + } + if e, err = parseNextRFC3659ListLine(l, c.options.location, e); err != nil { + return nil, err + } + } + return e, nil +} + +// IsTimePreciseInList returns true if client and server support the MLSD +// command so List can return time with 1-second precision for all files. +func (c *ServerConn) IsTimePreciseInList() bool { + return c.mlstSupported +} + +// ChangeDir issues a CWD FTP command, which changes the current directory to +// the specified path. +func (c *ServerConn) ChangeDir(path string) error { + _, _, err := c.cmd(StatusRequestedFileActionOK, "CWD %s", path) + return err +} + +// ChangeDirToParent issues a CDUP FTP command, which changes the current +// directory to the parent directory. This is similar to a call to ChangeDir +// with a path set to "..". +func (c *ServerConn) ChangeDirToParent() error { + _, _, err := c.cmd(StatusRequestedFileActionOK, "CDUP") + return err +} + +// CurrentDir issues a PWD FTP command, which Returns the path of the current +// directory. +func (c *ServerConn) CurrentDir() (string, error) { + _, msg, err := c.cmd(StatusPathCreated, "PWD") + if err != nil { + return "", err + } + + start := strings.Index(msg, "\"") + end := strings.LastIndex(msg, "\"") + + if start == -1 || end == -1 { + return "", errors.New("unsuported PWD response format") + } + + return msg[start+1 : end], nil +} + +// FileSize issues a SIZE FTP command, which Returns the size of the file +func (c *ServerConn) FileSize(path string) (int64, error) { + _, msg, err := c.cmd(StatusFile, "SIZE %s", path) + if err != nil { + return 0, err + } + + return strconv.ParseInt(msg, 10, 64) +} + +// GetTime issues the MDTM FTP command to obtain the file modification time. +// It returns a UTC time. +func (c *ServerConn) GetTime(path string) (time.Time, error) { + var t time.Time + if !c.mdtmSupported { + return t, errors.New("GetTime is not supported") + } + _, msg, err := c.cmd(StatusFile, "MDTM %s", path) + if err != nil { + return t, err + } + return time.ParseInLocation(timeFormat, msg, time.UTC) +} + +// IsGetTimeSupported allows library callers to check in advance that they +// can use GetTime to get file time. +func (c *ServerConn) IsGetTimeSupported() bool { + return c.mdtmSupported +} + +// SetTime issues the MFMT FTP command to set the file modification time. +// Also it can use a non-standard form of the MDTM command supported by +// the VsFtpd server instead of MFMT for the same purpose. +// See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html +func (c *ServerConn) SetTime(path string, t time.Time) (err error) { + utime := t.In(time.UTC).Format(timeFormat) + switch { + case c.mfmtSupported: + _, _, err = c.cmd(StatusFile, "MFMT %s %s", utime, path) + case c.mdtmCanWrite: + _, _, err = c.cmd(StatusFile, "MDTM %s %s", utime, path) + default: + err = errors.New("SetTime is not supported") + } + return +} + +// IsSetTimeSupported allows library callers to check in advance that they +// can use SetTime to set file time. +func (c *ServerConn) IsSetTimeSupported() bool { + return c.mfmtSupported || c.mdtmCanWrite +} + +// Retr issues a RETR FTP command to fetch the specified file from the remote +// FTP server. +// +// The returned ReadCloser must be closed to cleanup the FTP data connection. +func (c *ServerConn) Retr(path string) (*Response, error) { + return c.RetrFrom(path, 0) +} + +// RetrFrom issues a RETR FTP command to fetch the specified file from the remote +// FTP server, the server will not send the offset first bytes of the file. +// +// The returned ReadCloser must be closed to cleanup the FTP data connection. +func (c *ServerConn) RetrFrom(path string, offset uint64) (*Response, error) { + conn, err := c.cmdDataConnFrom(offset, "RETR %s", path) + if err != nil { + return nil, err + } + + return &Response{conn: conn, c: c}, nil +} + +// Stor issues a STOR FTP command to store a file to the remote FTP server. +// Stor creates the specified file with the content of the io.Reader. +// +// Hint: io.Pipe() can be used if an io.Writer is required. +func (c *ServerConn) Stor(path string, r io.Reader) error { + return c.StorFrom(path, r, 0) +} + +// checkDataShut reads the "closing data connection" status from the +// control connection. It is called after transferring a piece of data +// on the data connection during which the control connection was idle. +// This may result in the idle timeout triggering on the control connection +// right when we try to read the response. +// The ShutTimeout dial option will rescue here. It will nudge the control +// connection deadline right before checking the data closing status. +func (c *ServerConn) checkDataShut() error { + if c.options.shutTimeout != 0 { + shutDeadline := time.Now().Add(c.options.shutTimeout) + if err := c.netConn.SetDeadline(shutDeadline); err != nil { + return err + } + } + _, _, err := c.conn.ReadResponse(StatusClosingDataConnection) + return err +} + +// StorFrom issues a STOR FTP command to store a file to the remote FTP server. +// Stor creates the specified file with the content of the io.Reader, writing +// on the server will start at the given file offset. +// +// Hint: io.Pipe() can be used if an io.Writer is required. +func (c *ServerConn) StorFrom(path string, r io.Reader, offset uint64) error { + conn, err := c.cmdDataConnFrom(offset, "STOR %s", path) + if err != nil { + return err + } + + var errs *multierror.Error + + // if the upload fails we still need to try to read the server + // response otherwise if the failure is not due to a connection problem, + // for example the server denied the upload for quota limits, we miss + // the response and we cannot use the connection to send other commands. + if n, err := io.Copy(conn, r); err != nil { + errs = multierror.Append(errs, err) + } else if n == 0 { + // If we wrote no bytes and got no error, make sure we call + // tls.Handshake on the connection as it won't get called + // unless Write() is called. (See comment in openDataConn()). + // + // ProFTP doesn't like this and returns "Unable to build data + // connection: Operation not permitted" when trying to upload + // an empty file without this. + if do, ok := conn.(interface{ Handshake() error }); ok { + if err := do.Handshake(); err != nil { + errs = multierror.Append(errs, err) + } + } + } + + if err := conn.Close(); err != nil { + errs = multierror.Append(errs, err) + } + + if err := c.checkDataShut(); err != nil { + errs = multierror.Append(errs, err) + } + + return errs.ErrorOrNil() +} + +// Append issues a APPE FTP command to store a file to the remote FTP server. +// If a file already exists with the given path, then the content of the +// io.Reader is appended. Otherwise, a new file is created with that content. +// +// Hint: io.Pipe() can be used if an io.Writer is required. +func (c *ServerConn) Append(path string, r io.Reader) error { + conn, err := c.cmdDataConnFrom(0, "APPE %s", path) + if err != nil { + return err + } + + var errs *multierror.Error + + if _, err := io.Copy(conn, r); err != nil { + errs = multierror.Append(errs, err) + } + + if err := conn.Close(); err != nil { + errs = multierror.Append(errs, err) + } + + if err := c.checkDataShut(); err != nil { + errs = multierror.Append(errs, err) + } + + return errs.ErrorOrNil() +} + +// Rename renames a file on the remote FTP server. +func (c *ServerConn) Rename(from, to string) error { + _, _, err := c.cmd(StatusRequestFilePending, "RNFR %s", from) + if err != nil { + return err + } + + _, _, err = c.cmd(StatusRequestedFileActionOK, "RNTO %s", to) + return err +} + +// Delete issues a DELE FTP command to delete the specified file from the +// remote FTP server. +func (c *ServerConn) Delete(path string) error { + _, _, err := c.cmd(StatusRequestedFileActionOK, "DELE %s", path) + return err +} + +// RemoveDirRecur deletes a non-empty folder recursively using +// RemoveDir and Delete +func (c *ServerConn) RemoveDirRecur(path string) error { + err := c.ChangeDir(path) + if err != nil { + return err + } + currentDir, err := c.CurrentDir() + if err != nil { + return err + } + + entries, err := c.List(currentDir) + if err != nil { + return err + } + + for _, entry := range entries { + if entry.Name != ".." && entry.Name != "." { + if entry.Type == EntryTypeFolder { + err = c.RemoveDirRecur(currentDir + "/" + entry.Name) + if err != nil { + return err + } + } else { + err = c.Delete(entry.Name) + if err != nil { + return err + } + } + } + } + err = c.ChangeDirToParent() + if err != nil { + return err + } + err = c.RemoveDir(currentDir) + return err +} + +// MakeDir issues a MKD FTP command to create the specified directory on the +// remote FTP server. +func (c *ServerConn) MakeDir(path string) error { + _, _, err := c.cmd(StatusPathCreated, "MKD %s", path) + return err +} + +// RemoveDir issues a RMD FTP command to remove the specified directory from +// the remote FTP server. +func (c *ServerConn) RemoveDir(path string) error { + _, _, err := c.cmd(StatusRequestedFileActionOK, "RMD %s", path) + return err +} + +// Walk prepares the internal walk function so that the caller can begin traversing the directory +func (c *ServerConn) Walk(root string) *Walker { + w := new(Walker) + w.serverConn = c + + if !strings.HasSuffix(root, "/") { + root += "/" + } + + w.root = root + w.descend = true + + return w +} + +// NoOp issues a NOOP FTP command. +// NOOP has no effects and is usually used to prevent the remote FTP server to +// close the otherwise idle connection. +func (c *ServerConn) NoOp() error { + _, _, err := c.cmd(StatusCommandOK, "NOOP") + return err +} + +// Logout issues a REIN FTP command to logout the current user. +func (c *ServerConn) Logout() error { + _, _, err := c.cmd(StatusReady, "REIN") + return err +} + +// Quit issues a QUIT FTP command to properly close the connection from the +// remote FTP server. +func (c *ServerConn) Quit() error { + var errs *multierror.Error + + if _, err := c.conn.Cmd("QUIT"); err != nil { + errs = multierror.Append(errs, err) + } + + if err := c.conn.Close(); err != nil { + errs = multierror.Append(errs, err) + } + + return errs.ErrorOrNil() +} + +// Read implements the io.Reader interface on a FTP data connection. +func (r *Response) Read(buf []byte) (int, error) { + return r.conn.Read(buf) +} + +// Close implements the io.Closer interface on a FTP data connection. +// After the first call, Close will do nothing and return nil. +func (r *Response) Close() error { + if r.closed { + return nil + } + + var errs *multierror.Error + + if err := r.conn.Close(); err != nil { + errs = multierror.Append(errs, err) + } + + if err := r.c.checkDataShut(); err != nil { + errs = multierror.Append(errs, err) + } + + r.closed = true + return errs.ErrorOrNil() +} + +// SetDeadline sets the deadlines associated with the connection. +func (r *Response) SetDeadline(t time.Time) error { + return r.conn.SetDeadline(t) +} + +// String returns the string representation of EntryType t. +func (t EntryType) String() string { + return [...]string{"file", "folder", "link"}[t] +} diff --git a/vendor/github.com/jlaffaye/ftp/parse.go b/vendor/github.com/jlaffaye/ftp/parse.go new file mode 100644 index 000000000..a4af062e1 --- /dev/null +++ b/vendor/github.com/jlaffaye/ftp/parse.go @@ -0,0 +1,277 @@ +package ftp + +import ( + "errors" + "fmt" + "strconv" + "strings" + "time" +) + +var errUnsupportedListLine = errors.New("unsupported LIST line") +var errUnsupportedListDate = errors.New("unsupported LIST date") +var errUnknownListEntryType = errors.New("unknown entry type") + +type parseFunc func(string, time.Time, *time.Location) (*Entry, error) + +var listLineParsers = []parseFunc{ + parseRFC3659ListLine, + parseLsListLine, + parseDirListLine, + parseHostedFTPLine, +} + +var dirTimeFormats = []string{ + "01-02-06 03:04PM", + "2006-01-02 15:04", + "01-02-2006 03:04PM", + "01-02-2006 15:04", +} + +// parseRFC3659ListLine parses the style of directory line defined in RFC 3659. +func parseRFC3659ListLine(line string, _ time.Time, loc *time.Location) (*Entry, error) { + return parseNextRFC3659ListLine(line, loc, &Entry{}) +} + +func parseNextRFC3659ListLine(line string, loc *time.Location, e *Entry) (*Entry, error) { + iSemicolon := strings.Index(line, ";") + iWhitespace := strings.Index(line, " ") + + if iSemicolon < 0 || iSemicolon > iWhitespace { + return nil, errUnsupportedListLine + } + + name := line[iWhitespace+1:] + if e.Name == "" { + e.Name = name + } else if e.Name != name { + // All lines must have the same name + return nil, errUnsupportedListLine + } + + for _, field := range strings.Split(line[:iWhitespace-1], ";") { + i := strings.Index(field, "=") + if i < 1 { + return nil, errUnsupportedListLine + } + + key := strings.ToLower(field[:i]) + value := field[i+1:] + + switch key { + case "modify": + var err error + e.Time, err = time.ParseInLocation("20060102150405", value, loc) + if err != nil { + return nil, err + } + case "type": + switch value { + case "dir", "cdir", "pdir": + e.Type = EntryTypeFolder + case "file": + e.Type = EntryTypeFile + } + case "size": + if err := e.setSize(value); err != nil { + return nil, err + } + } + } + return e, nil +} + +// parseLsListLine parses a directory line in a format based on the output of +// the UNIX ls command. +func parseLsListLine(line string, now time.Time, loc *time.Location) (*Entry, error) { + + // Has the first field a length of exactly 10 bytes + // - or 10 bytes with an additional '+' character for indicating ACLs? + // If not, return. + if i := strings.IndexByte(line, ' '); !(i == 10 || (i == 11 && line[10] == '+')) { + return nil, errUnsupportedListLine + } + + scanner := newScanner(line) + fields := scanner.NextFields(6) + + if len(fields) < 6 { + return nil, errUnsupportedListLine + } + + if fields[1] == "folder" && fields[2] == "0" { + e := &Entry{ + Type: EntryTypeFolder, + Name: scanner.Remaining(), + } + if err := e.setTime(fields[3:6], now, loc); err != nil { + return nil, err + } + + return e, nil + } + + if fields[1] == "0" { + fields = append(fields, scanner.Next()) + e := &Entry{ + Type: EntryTypeFile, + Name: scanner.Remaining(), + } + + if err := e.setSize(fields[2]); err != nil { + return nil, errUnsupportedListLine + } + if err := e.setTime(fields[4:7], now, loc); err != nil { + return nil, err + } + + return e, nil + } + + // Read two more fields + fields = append(fields, scanner.NextFields(2)...) + if len(fields) < 8 { + return nil, errUnsupportedListLine + } + + e := &Entry{ + Name: scanner.Remaining(), + } + switch fields[0][0] { + case '-': + e.Type = EntryTypeFile + if err := e.setSize(fields[4]); err != nil { + return nil, err + } + case 'd': + e.Type = EntryTypeFolder + case 'l': + e.Type = EntryTypeLink + + // Split link name and target + if i := strings.Index(e.Name, " -> "); i > 0 { + e.Target = e.Name[i+4:] + e.Name = e.Name[:i] + } + default: + return nil, errUnknownListEntryType + } + + if err := e.setTime(fields[5:8], now, loc); err != nil { + return nil, err + } + + return e, nil +} + +// parseDirListLine parses a directory line in a format based on the output of +// the MS-DOS DIR command. +func parseDirListLine(line string, now time.Time, loc *time.Location) (*Entry, error) { + e := &Entry{} + var err error + + // Try various time formats that DIR might use, and stop when one works. + for _, format := range dirTimeFormats { + if len(line) > len(format) { + e.Time, err = time.ParseInLocation(format, line[:len(format)], loc) + if err == nil { + line = line[len(format):] + break + } + } + } + if err != nil { + // None of the time formats worked. + return nil, errUnsupportedListLine + } + + line = strings.TrimLeft(line, " ") + if strings.HasPrefix(line, "") { + e.Type = EntryTypeFolder + line = strings.TrimPrefix(line, "") + } else { + space := strings.Index(line, " ") + if space == -1 { + return nil, errUnsupportedListLine + } + e.Size, err = strconv.ParseUint(line[:space], 10, 64) + if err != nil { + return nil, errUnsupportedListLine + } + e.Type = EntryTypeFile + line = line[space:] + } + + e.Name = strings.TrimLeft(line, " ") + return e, nil +} + +// parseHostedFTPLine parses a directory line in the non-standard format used +// by hostedftp.com +// -r-------- 0 user group 65222236 Feb 24 00:39 UABlacklistingWeek8.csv +// (The link count is inexplicably 0) +func parseHostedFTPLine(line string, now time.Time, loc *time.Location) (*Entry, error) { + // Has the first field a length of 10 bytes? + if strings.IndexByte(line, ' ') != 10 { + return nil, errUnsupportedListLine + } + + scanner := newScanner(line) + fields := scanner.NextFields(2) + + if len(fields) < 2 || fields[1] != "0" { + return nil, errUnsupportedListLine + } + + // Set link count to 1 and attempt to parse as Unix. + return parseLsListLine(fields[0]+" 1 "+scanner.Remaining(), now, loc) +} + +// parseListLine parses the various non-standard format returned by the LIST +// FTP command. +func parseListLine(line string, now time.Time, loc *time.Location) (*Entry, error) { + for _, f := range listLineParsers { + e, err := f(line, now, loc) + if err != errUnsupportedListLine { + return e, err + } + } + return nil, errUnsupportedListLine +} + +func (e *Entry) setSize(str string) (err error) { + e.Size, err = strconv.ParseUint(str, 0, 64) + return +} + +func (e *Entry) setTime(fields []string, now time.Time, loc *time.Location) (err error) { + if strings.Contains(fields[2], ":") { // contains time + thisYear, _, _ := now.Date() + timeStr := fmt.Sprintf("%s %s %d %s", fields[1], fields[0], thisYear, fields[2]) + e.Time, err = time.ParseInLocation("_2 Jan 2006 15:04", timeStr, loc) + + /* + On unix, `info ls` shows: + + 10.1.6 Formatting file timestamps + --------------------------------- + + A timestamp is considered to be “recent” if it is less than six + months old, and is not dated in the future. If a timestamp dated today + is not listed in recent form, the timestamp is in the future, which + means you probably have clock skew problems which may break programs + like ‘make’ that rely on file timestamps. + */ + if !e.Time.Before(now.AddDate(0, 6, 0)) { + e.Time = e.Time.AddDate(-1, 0, 0) + } + + } else { // only the date + if len(fields[2]) != 4 { + return errUnsupportedListDate + } + timeStr := fmt.Sprintf("%s %s %s 00:00", fields[1], fields[0], fields[2]) + e.Time, err = time.ParseInLocation("_2 Jan 2006 15:04", timeStr, loc) + } + return +} diff --git a/vendor/github.com/jlaffaye/ftp/scanner.go b/vendor/github.com/jlaffaye/ftp/scanner.go new file mode 100644 index 000000000..0dcc8ae64 --- /dev/null +++ b/vendor/github.com/jlaffaye/ftp/scanner.go @@ -0,0 +1,58 @@ +package ftp + +// A scanner for fields delimited by one or more whitespace characters +type scanner struct { + bytes []byte + position int +} + +// newScanner creates a new scanner +func newScanner(str string) *scanner { + return &scanner{ + bytes: []byte(str), + } +} + +// NextFields returns the next `count` fields +func (s *scanner) NextFields(count int) []string { + fields := make([]string, 0, count) + for i := 0; i < count; i++ { + if field := s.Next(); field != "" { + fields = append(fields, field) + } else { + break + } + } + return fields +} + +// Next returns the next field +func (s *scanner) Next() string { + sLen := len(s.bytes) + + // skip trailing whitespace + for s.position < sLen { + if s.bytes[s.position] != ' ' { + break + } + s.position++ + } + + start := s.position + + // skip non-whitespace + for s.position < sLen { + if s.bytes[s.position] == ' ' { + s.position++ + return string(s.bytes[start : s.position-1]) + } + s.position++ + } + + return string(s.bytes[start:s.position]) +} + +// Remaining returns the remaining string +func (s *scanner) Remaining() string { + return string(s.bytes[s.position:len(s.bytes)]) +} diff --git a/vendor/github.com/jlaffaye/ftp/status.go b/vendor/github.com/jlaffaye/ftp/status.go new file mode 100644 index 000000000..8d357b03b --- /dev/null +++ b/vendor/github.com/jlaffaye/ftp/status.go @@ -0,0 +1,119 @@ +package ftp + +import "fmt" + +// FTP status codes, defined in RFC 959 +const ( + StatusInitiating = 100 + StatusRestartMarker = 110 + StatusReadyMinute = 120 + StatusAlreadyOpen = 125 + StatusAboutToSend = 150 + + StatusCommandOK = 200 + StatusCommandNotImplemented = 202 + StatusSystem = 211 + StatusDirectory = 212 + StatusFile = 213 + StatusHelp = 214 + StatusName = 215 + StatusReady = 220 + StatusClosing = 221 + StatusDataConnectionOpen = 225 + StatusClosingDataConnection = 226 + StatusPassiveMode = 227 + StatusLongPassiveMode = 228 + StatusExtendedPassiveMode = 229 + StatusLoggedIn = 230 + StatusLoggedOut = 231 + StatusLogoutAck = 232 + StatusAuthOK = 234 + StatusRequestedFileActionOK = 250 + StatusPathCreated = 257 + + StatusUserOK = 331 + StatusLoginNeedAccount = 332 + StatusRequestFilePending = 350 + + StatusNotAvailable = 421 + StatusCanNotOpenDataConnection = 425 + StatusTransfertAborted = 426 + StatusInvalidCredentials = 430 + StatusHostUnavailable = 434 + StatusFileActionIgnored = 450 + StatusActionAborted = 451 + Status452 = 452 + + StatusBadCommand = 500 + StatusBadArguments = 501 + StatusNotImplemented = 502 + StatusBadSequence = 503 + StatusNotImplementedParameter = 504 + StatusNotLoggedIn = 530 + StatusStorNeedAccount = 532 + StatusFileUnavailable = 550 + StatusPageTypeUnknown = 551 + StatusExceededStorage = 552 + StatusBadFileName = 553 +) + +var statusText = map[int]string{ + // 200 + StatusCommandOK: "Command okay.", + StatusCommandNotImplemented: "Command not implemented, superfluous at this site.", + StatusSystem: "System status, or system help reply.", + StatusDirectory: "Directory status.", + StatusFile: "File status.", + StatusHelp: "Help message.", + StatusName: "", + StatusReady: "Service ready for new user.", + StatusClosing: "Service closing control connection.", + StatusDataConnectionOpen: "Data connection open; no transfer in progress.", + StatusClosingDataConnection: "Closing data connection. Requested file action successful.", + StatusPassiveMode: "Entering Passive Mode.", + StatusLongPassiveMode: "Entering Long Passive Mode.", + StatusExtendedPassiveMode: "Entering Extended Passive Mode.", + StatusLoggedIn: "User logged in, proceed.", + StatusLoggedOut: "User logged out; service terminated.", + StatusLogoutAck: "Logout command noted, will complete when transfer done.", + StatusAuthOK: "AUTH command OK", + StatusRequestedFileActionOK: "Requested file action okay, completed.", + StatusPathCreated: "Path created.", + + // 300 + StatusUserOK: "User name okay, need password.", + StatusLoginNeedAccount: "Need account for login.", + StatusRequestFilePending: "Requested file action pending further information.", + + // 400 + StatusNotAvailable: "Service not available, closing control connection.", + StatusCanNotOpenDataConnection: "Can't open data connection.", + StatusTransfertAborted: "Connection closed; transfer aborted.", + StatusInvalidCredentials: "Invalid username or password.", + StatusHostUnavailable: "Requested host unavailable.", + StatusFileActionIgnored: "Requested file action not taken.", + StatusActionAborted: "Requested action aborted. Local error in processing.", + Status452: "Insufficient storage space in system.", + + // 500 + StatusBadCommand: "Command unrecognized.", + StatusBadArguments: "Syntax error in parameters or arguments.", + StatusNotImplemented: "Command not implemented.", + StatusBadSequence: "Bad sequence of commands.", + StatusNotImplementedParameter: "Command not implemented for that parameter.", + StatusNotLoggedIn: "Not logged in.", + StatusStorNeedAccount: "Need account for storing files.", + StatusFileUnavailable: "File unavailable.", + StatusPageTypeUnknown: "Page type unknown.", + StatusExceededStorage: "Exceeded storage allocation.", + StatusBadFileName: "File name not allowed.", +} + +// StatusText returns a text for the FTP status code. It returns the empty string if the code is unknown. +func StatusText(code int) string { + str, ok := statusText[code] + if !ok { + str = fmt.Sprintf("Unknown status code: %d", code) + } + return str +} diff --git a/vendor/github.com/jlaffaye/ftp/walker.go b/vendor/github.com/jlaffaye/ftp/walker.go new file mode 100644 index 000000000..81735f16c --- /dev/null +++ b/vendor/github.com/jlaffaye/ftp/walker.go @@ -0,0 +1,98 @@ +package ftp + +import ( + "path" +) + +// Walker traverses the directory tree of a remote FTP server +type Walker struct { + serverConn *ServerConn + root string + cur *item + stack []*item + descend bool +} + +type item struct { + path string + entry *Entry + err error +} + +// Next advances the Walker to the next file or directory, +// which will then be available through the Path, Stat, and Err methods. +// It returns false when the walk stops at the end of the tree. +func (w *Walker) Next() bool { + // check if we need to init cur, maybe this should be inside Walk + if w.cur == nil { + w.cur = &item{ + path: w.root, + entry: &Entry{ + Type: EntryTypeFolder, + }, + } + } + + if w.descend && w.cur.entry.Type == EntryTypeFolder { + entries, err := w.serverConn.List(w.cur.path) + + // an error occurred, drop out and stop walking + if err != nil { + w.cur.err = err + return false + } + + for _, entry := range entries { + if entry.Name == "." || entry.Name == ".." { + continue + } + + item := &item{ + path: path.Join(w.cur.path, entry.Name), + entry: entry, + } + + w.stack = append(w.stack, item) + } + } + + if len(w.stack) == 0 { + return false + } + + // update cur + i := len(w.stack) - 1 + w.cur = w.stack[i] + w.stack = w.stack[:i] + + // reset SkipDir + w.descend = true + + return true +} + +// SkipDir tells the Next function to skip the currently processed directory +func (w *Walker) SkipDir() { + w.descend = false +} + +// Err returns the error, if any, for the most recent attempt by Next to +// visit a file or a directory. If a directory has an error, the walker +// will not descend in that directory +func (w *Walker) Err() error { + return w.cur.err +} + +// Stat returns info for the most recent file or directory +// visited by a call to Next. +func (w *Walker) Stat() *Entry { + return w.cur.entry +} + +// Path returns the path to the most recent file or directory +// visited by a call to Next. It contains the argument to Walk +// as a prefix; that is, if Walk is called with "dir", which is +// a directory containing the file "a", Path will return "dir/a". +func (w *Walker) Path() string { + return w.cur.path +} diff --git a/vendor/github.com/jmespath/go-jmespath/.gitignore b/vendor/github.com/jmespath/go-jmespath/.gitignore new file mode 100644 index 000000000..5091fb073 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/.gitignore @@ -0,0 +1,4 @@ +/jpgo +jmespath-fuzz.zip +cpu.out +go-jmespath.test diff --git a/vendor/github.com/jmespath/go-jmespath/.travis.yml b/vendor/github.com/jmespath/go-jmespath/.travis.yml new file mode 100644 index 000000000..c56f37c0c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/.travis.yml @@ -0,0 +1,28 @@ +language: go + +sudo: false + +go: + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - 1.13.x + - 1.14.x + - 1.15.x + - tip + +allow_failures: + - go: tip + +script: make build + +matrix: + include: + - language: go + go: 1.15.x + script: make test diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE new file mode 100644 index 000000000..b03310a91 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/LICENSE @@ -0,0 +1,13 @@ +Copyright 2015 James Saryerwinnie + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile new file mode 100644 index 000000000..fb38ec276 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/Makefile @@ -0,0 +1,51 @@ + +CMD = jpgo + +SRC_PKGS=./ ./cmd/... ./fuzz/... + +help: + @echo "Please use \`make ' where is one of" + @echo " test to run all the tests" + @echo " build to build the library and jp executable" + @echo " generate to run codegen" + + +generate: + go generate ${SRC_PKGS} + +build: + rm -f $(CMD) + go build ${SRC_PKGS} + rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... + mv cmd/$(CMD)/$(CMD) . + +test: test-internal-testify + echo "making tests ${SRC_PKGS}" + go test -v ${SRC_PKGS} + +check: + go vet ${SRC_PKGS} + @echo "golint ${SRC_PKGS}" + @lint=`golint ${SRC_PKGS}`; \ + lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ + echo "$$lint"; \ + if [ "$$lint" != "" ]; then exit 1; fi + +htmlc: + go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov + +buildfuzz: + go-fuzz-build github.com/jmespath/go-jmespath/fuzz + +fuzz: buildfuzz + go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata + +bench: + go test -bench . -cpuprofile cpu.out + +pprof-cpu: + go tool pprof ./go-jmespath.test ./cpu.out + +test-internal-testify: + cd internal/testify && go test ./... + diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md new file mode 100644 index 000000000..110ad7999 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/README.md @@ -0,0 +1,87 @@ +# go-jmespath - A JMESPath implementation in Go + +[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) + + + +go-jmespath is a GO implementation of JMESPath, +which is a query language for JSON. It will take a JSON +document and transform it into another JSON document +through a JMESPath expression. + +Using go-jmespath is really easy. There's a single function +you use, `jmespath.search`: + + +```go +> import "github.com/jmespath/go-jmespath" +> +> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.Search("foo.bar.baz[2]", data) +result = 2 +``` + +In the example we gave the ``search`` function input data of +`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}` as well as the JMESPath +expression `foo.bar.baz[2]`, and the `search` function evaluated +the expression against the input data to produce the result ``2``. + +The JMESPath language can do a lot more than select an element +from a list. Here are a few more examples: + +```go +> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.search("foo.bar", data) +result = { "baz": [ 0, 1, 2, 3, 4 ] } + + +> var jsondata = []byte(`{"foo": [{"first": "a", "last": "b"}, + {"first": "c", "last": "d"}]}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.search({"foo[*].first", data) +result [ 'a', 'c' ] + + +> var jsondata = []byte(`{"foo": [{"age": 20}, {"age": 25}, + {"age": 30}, {"age": 35}, + {"age": 40}]}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.search("foo[?age > `30`]") +result = [ { age: 35 }, { age: 40 } ] +``` + +You can also pre-compile your query. This is usefull if +you are going to run multiple searches with it: + +```go + > var jsondata = []byte(`{"foo": "bar"}`) + > var data interface{} + > err := json.Unmarshal(jsondata, &data) + > precompiled, err := Compile("foo") + > if err != nil{ + > // ... handle the error + > } + > result, err := precompiled.Search(data) + result = "bar" +``` + +## More Resources + +The example above only show a small amount of what +a JMESPath expression can do. If you want to take a +tour of the language, the *best* place to go is the +[JMESPath Tutorial](http://jmespath.org/tutorial.html). + +One of the best things about JMESPath is that it is +implemented in many different programming languages including +python, ruby, php, lua, etc. To see a complete list of libraries, +check out the [JMESPath libraries page](http://jmespath.org/libraries.html). + +And finally, the full JMESPath specification can be found +on the [JMESPath site](http://jmespath.org/specification.html). diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go new file mode 100644 index 000000000..010efe9bf --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/api.go @@ -0,0 +1,49 @@ +package jmespath + +import "strconv" + +// JMESPath is the representation of a compiled JMES path query. A JMESPath is +// safe for concurrent use by multiple goroutines. +type JMESPath struct { + ast ASTNode + intr *treeInterpreter +} + +// Compile parses a JMESPath expression and returns, if successful, a JMESPath +// object that can be used to match against data. +func Compile(expression string) (*JMESPath, error) { + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + jmespath := &JMESPath{ast: ast, intr: newInterpreter()} + return jmespath, nil +} + +// MustCompile is like Compile but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled +// JMESPaths. +func MustCompile(expression string) *JMESPath { + jmespath, err := Compile(expression) + if err != nil { + panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) + } + return jmespath +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func (jp *JMESPath) Search(data interface{}) (interface{}, error) { + return jp.intr.Execute(jp.ast, data) +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func Search(expression string, data interface{}) (interface{}, error) { + intr := newInterpreter() + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + return intr.Execute(ast, data) +} diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go new file mode 100644 index 000000000..1cd2d239c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type astNodeType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" + +var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} + +func (i astNodeType) String() string { + if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { + return fmt.Sprintf("astNodeType(%d)", i) + } + return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go new file mode 100644 index 000000000..9b7cd89b4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/functions.go @@ -0,0 +1,842 @@ +package jmespath + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "unicode/utf8" +) + +type jpFunction func(arguments []interface{}) (interface{}, error) + +type jpType string + +const ( + jpUnknown jpType = "unknown" + jpNumber jpType = "number" + jpString jpType = "string" + jpArray jpType = "array" + jpObject jpType = "object" + jpArrayNumber jpType = "array[number]" + jpArrayString jpType = "array[string]" + jpExpref jpType = "expref" + jpAny jpType = "any" +) + +type functionEntry struct { + name string + arguments []argSpec + handler jpFunction + hasExpRef bool +} + +type argSpec struct { + types []jpType + variadic bool +} + +type byExprString struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprString) Len() int { + return len(a.items) +} +func (a *byExprString) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprString) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(string) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(string) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type byExprFloat struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprFloat) Len() int { + return len(a.items) +} +func (a *byExprFloat) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprFloat) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(float64) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(float64) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type functionCaller struct { + functionTable map[string]functionEntry +} + +func newFunctionCaller() *functionCaller { + caller := &functionCaller{} + caller.functionTable = map[string]functionEntry{ + "length": { + name: "length", + arguments: []argSpec{ + {types: []jpType{jpString, jpArray, jpObject}}, + }, + handler: jpfLength, + }, + "starts_with": { + name: "starts_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfStartsWith, + }, + "abs": { + name: "abs", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfAbs, + }, + "avg": { + name: "avg", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfAvg, + }, + "ceil": { + name: "ceil", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfCeil, + }, + "contains": { + name: "contains", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + {types: []jpType{jpAny}}, + }, + handler: jpfContains, + }, + "ends_with": { + name: "ends_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfEndsWith, + }, + "floor": { + name: "floor", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfFloor, + }, + "map": { + name: "amp", + arguments: []argSpec{ + {types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, + }, + handler: jpfMap, + hasExpRef: true, + }, + "max": { + name: "max", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMax, + }, + "merge": { + name: "merge", + arguments: []argSpec{ + {types: []jpType{jpObject}, variadic: true}, + }, + handler: jpfMerge, + }, + "max_by": { + name: "max_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMaxBy, + hasExpRef: true, + }, + "sum": { + name: "sum", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfSum, + }, + "min": { + name: "min", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMin, + }, + "min_by": { + name: "min_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMinBy, + hasExpRef: true, + }, + "type": { + name: "type", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfType, + }, + "keys": { + name: "keys", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfKeys, + }, + "values": { + name: "values", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfValues, + }, + "sort": { + name: "sort", + arguments: []argSpec{ + {types: []jpType{jpArrayString, jpArrayNumber}}, + }, + handler: jpfSort, + }, + "sort_by": { + name: "sort_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfSortBy, + hasExpRef: true, + }, + "join": { + name: "join", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpArrayString}}, + }, + handler: jpfJoin, + }, + "reverse": { + name: "reverse", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + }, + handler: jpfReverse, + }, + "to_array": { + name: "to_array", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToArray, + }, + "to_string": { + name: "to_string", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToString, + }, + "to_number": { + name: "to_number", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToNumber, + }, + "not_null": { + name: "not_null", + arguments: []argSpec{ + {types: []jpType{jpAny}, variadic: true}, + }, + handler: jpfNotNull, + }, + } + return caller +} + +func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { + if len(e.arguments) == 0 { + return arguments, nil + } + if !e.arguments[len(e.arguments)-1].variadic { + if len(e.arguments) != len(arguments) { + return nil, errors.New("incorrect number of args") + } + for i, spec := range e.arguments { + userArg := arguments[i] + err := spec.typeCheck(userArg) + if err != nil { + return nil, err + } + } + return arguments, nil + } + if len(arguments) < len(e.arguments) { + return nil, errors.New("Invalid arity.") + } + return arguments, nil +} + +func (a *argSpec) typeCheck(arg interface{}) error { + for _, t := range a.types { + switch t { + case jpNumber: + if _, ok := arg.(float64); ok { + return nil + } + case jpString: + if _, ok := arg.(string); ok { + return nil + } + case jpArray: + if isSliceType(arg) { + return nil + } + case jpObject: + if _, ok := arg.(map[string]interface{}); ok { + return nil + } + case jpArrayNumber: + if _, ok := toArrayNum(arg); ok { + return nil + } + case jpArrayString: + if _, ok := toArrayStr(arg); ok { + return nil + } + case jpAny: + return nil + case jpExpref: + if _, ok := arg.(expRef); ok { + return nil + } + } + } + return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) +} + +func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { + entry, ok := f.functionTable[name] + if !ok { + return nil, errors.New("unknown function: " + name) + } + resolvedArgs, err := entry.resolveArgs(arguments) + if err != nil { + return nil, err + } + if entry.hasExpRef { + var extra []interface{} + extra = append(extra, intr) + resolvedArgs = append(extra, resolvedArgs...) + } + return entry.handler(resolvedArgs) +} + +func jpfAbs(arguments []interface{}) (interface{}, error) { + num := arguments[0].(float64) + return math.Abs(num), nil +} + +func jpfLength(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if c, ok := arg.(string); ok { + return float64(utf8.RuneCountInString(c)), nil + } else if isSliceType(arg) { + v := reflect.ValueOf(arg) + return float64(v.Len()), nil + } else if c, ok := arg.(map[string]interface{}); ok { + return float64(len(c)), nil + } + return nil, errors.New("could not compute length()") +} + +func jpfStartsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + prefix := arguments[1].(string) + return strings.HasPrefix(search, prefix), nil +} + +func jpfAvg(arguments []interface{}) (interface{}, error) { + // We've already type checked the value so we can safely use + // type assertions. + args := arguments[0].([]interface{}) + length := float64(len(args)) + numerator := 0.0 + for _, n := range args { + numerator += n.(float64) + } + return numerator / length, nil +} +func jpfCeil(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Ceil(val), nil +} +func jpfContains(arguments []interface{}) (interface{}, error) { + search := arguments[0] + el := arguments[1] + if searchStr, ok := search.(string); ok { + if elStr, ok := el.(string); ok { + return strings.Index(searchStr, elStr) != -1, nil + } + return false, nil + } + // Otherwise this is a generic contains for []interface{} + general := search.([]interface{}) + for _, item := range general { + if item == el { + return true, nil + } + } + return false, nil +} +func jpfEndsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + suffix := arguments[1].(string) + return strings.HasSuffix(search, suffix), nil +} +func jpfFloor(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Floor(val), nil +} +func jpfMap(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + exp := arguments[1].(expRef) + node := exp.ref + arr := arguments[2].([]interface{}) + mapped := make([]interface{}, 0, len(arr)) + for _, value := range arr { + current, err := intr.Execute(node, value) + if err != nil { + return nil, err + } + mapped = append(mapped, current) + } + return mapped, nil +} +func jpfMax(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil + } + // Otherwise we're dealing with a max() of strings. + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil +} +func jpfMerge(arguments []interface{}) (interface{}, error) { + final := make(map[string]interface{}) + for _, m := range arguments { + mapped := m.(map[string]interface{}) + for key, value := range mapped { + final[key] = value + } + } + return final, nil +} +func jpfMaxBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + switch t := start.(type) { + case float64: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + case string: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + default: + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfSum(arguments []interface{}) (interface{}, error) { + items, _ := toArrayNum(arguments[0]) + sum := 0.0 + for _, item := range items { + sum += item + } + return sum, nil +} + +func jpfMin(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil + } + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil +} + +func jpfMinBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if t, ok := start.(float64); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else if t, ok := start.(string); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfType(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if _, ok := arg.(float64); ok { + return "number", nil + } + if _, ok := arg.(string); ok { + return "string", nil + } + if _, ok := arg.([]interface{}); ok { + return "array", nil + } + if _, ok := arg.(map[string]interface{}); ok { + return "object", nil + } + if arg == nil { + return "null", nil + } + if arg == true || arg == false { + return "boolean", nil + } + return nil, errors.New("unknown type") +} +func jpfKeys(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for key := range arg { + collected = append(collected, key) + } + return collected, nil +} +func jpfValues(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for _, value := range arg { + collected = append(collected, value) + } + return collected, nil +} +func jpfSort(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + d := sort.Float64Slice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil + } + // Otherwise we're dealing with sort()'ing strings. + items, _ := toArrayStr(arguments[0]) + d := sort.StringSlice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil +} +func jpfSortBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return arr, nil + } else if len(arr) == 1 { + return arr, nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if _, ok := start.(float64); ok { + sortable := &byExprFloat{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else if _, ok := start.(string); ok { + sortable := &byExprString{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfJoin(arguments []interface{}) (interface{}, error) { + sep := arguments[0].(string) + // We can't just do arguments[1].([]string), we have to + // manually convert each item to a string. + arrayStr := []string{} + for _, item := range arguments[1].([]interface{}) { + arrayStr = append(arrayStr, item.(string)) + } + return strings.Join(arrayStr, sep), nil +} +func jpfReverse(arguments []interface{}) (interface{}, error) { + if s, ok := arguments[0].(string); ok { + r := []rune(s) + for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r), nil + } + items := arguments[0].([]interface{}) + length := len(items) + reversed := make([]interface{}, length) + for i, item := range items { + reversed[length-(i+1)] = item + } + return reversed, nil +} +func jpfToArray(arguments []interface{}) (interface{}, error) { + if _, ok := arguments[0].([]interface{}); ok { + return arguments[0], nil + } + return arguments[:1:1], nil +} +func jpfToString(arguments []interface{}) (interface{}, error) { + if v, ok := arguments[0].(string); ok { + return v, nil + } + result, err := json.Marshal(arguments[0]) + if err != nil { + return nil, err + } + return string(result), nil +} +func jpfToNumber(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if v, ok := arg.(float64); ok { + return v, nil + } + if v, ok := arg.(string); ok { + conv, err := strconv.ParseFloat(v, 64) + if err != nil { + return nil, nil + } + return conv, nil + } + if _, ok := arg.([]interface{}); ok { + return nil, nil + } + if _, ok := arg.(map[string]interface{}); ok { + return nil, nil + } + if arg == nil { + return nil, nil + } + if arg == true || arg == false { + return nil, nil + } + return nil, errors.New("unknown type") +} +func jpfNotNull(arguments []interface{}) (interface{}, error) { + for _, arg := range arguments { + if arg != nil { + return arg, nil + } + } + return nil, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go new file mode 100644 index 000000000..13c74604c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/interpreter.go @@ -0,0 +1,418 @@ +package jmespath + +import ( + "errors" + "reflect" + "unicode" + "unicode/utf8" +) + +/* This is a tree based interpreter. It walks the AST and directly + interprets the AST to search through a JSON document. +*/ + +type treeInterpreter struct { + fCall *functionCaller +} + +func newInterpreter() *treeInterpreter { + interpreter := treeInterpreter{} + interpreter.fCall = newFunctionCaller() + return &interpreter +} + +type expRef struct { + ref ASTNode +} + +// Execute takes an ASTNode and input data and interprets the AST directly. +// It will produce the result of applying the JMESPath expression associated +// with the ASTNode to the input data "value". +func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { + switch node.nodeType { + case ASTComparator: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + right, err := intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + switch node.value { + case tEQ: + return objsEqual(left, right), nil + case tNE: + return !objsEqual(left, right), nil + } + leftNum, ok := left.(float64) + if !ok { + return nil, nil + } + rightNum, ok := right.(float64) + if !ok { + return nil, nil + } + switch node.value { + case tGT: + return leftNum > rightNum, nil + case tGTE: + return leftNum >= rightNum, nil + case tLT: + return leftNum < rightNum, nil + case tLTE: + return leftNum <= rightNum, nil + } + case ASTExpRef: + return expRef{ref: node.children[0]}, nil + case ASTFunctionExpression: + resolvedArgs := []interface{}{} + for _, arg := range node.children { + current, err := intr.Execute(arg, value) + if err != nil { + return nil, err + } + resolvedArgs = append(resolvedArgs, current) + } + return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) + case ASTField: + if m, ok := value.(map[string]interface{}); ok { + key := node.value.(string) + return m[key], nil + } + return intr.fieldFromStruct(node.value.(string), value) + case ASTFilterProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.filterProjectionWithReflection(node, left) + } + return nil, nil + } + compareNode := node.children[2] + collected := []interface{}{} + for _, element := range sliceType { + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil + case ASTFlatten: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + // If we can't type convert to []interface{}, there's + // a chance this could still work via reflection if we're + // dealing with user provided types. + if isSliceType(left) { + return intr.flattenWithReflection(left) + } + return nil, nil + } + flattened := []interface{}{} + for _, element := range sliceType { + if elementSlice, ok := element.([]interface{}); ok { + flattened = append(flattened, elementSlice...) + } else if isSliceType(element) { + reflectFlat := []interface{}{} + v := reflect.ValueOf(element) + for i := 0; i < v.Len(); i++ { + reflectFlat = append(reflectFlat, v.Index(i).Interface()) + } + flattened = append(flattened, reflectFlat...) + } else { + flattened = append(flattened, element) + } + } + return flattened, nil + case ASTIdentity, ASTCurrentNode: + return value, nil + case ASTIndex: + if sliceType, ok := value.([]interface{}); ok { + index := node.value.(int) + if index < 0 { + index += len(sliceType) + } + if index < len(sliceType) && index >= 0 { + return sliceType[index], nil + } + return nil, nil + } + // Otherwise try via reflection. + rv := reflect.ValueOf(value) + if rv.Kind() == reflect.Slice { + index := node.value.(int) + if index < 0 { + index += rv.Len() + } + if index < rv.Len() && index >= 0 { + v := rv.Index(index) + return v.Interface(), nil + } + } + return nil, nil + case ASTKeyValPair: + return intr.Execute(node.children[0], value) + case ASTLiteral: + return node.value, nil + case ASTMultiSelectHash: + if value == nil { + return nil, nil + } + collected := make(map[string]interface{}) + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + key := child.value.(string) + collected[key] = current + } + return collected, nil + case ASTMultiSelectList: + if value == nil { + return nil, nil + } + collected := []interface{}{} + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + collected = append(collected, current) + } + return collected, nil + case ASTOrExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + matched, err = intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + } + return matched, nil + case ASTAndExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return matched, nil + } + return intr.Execute(node.children[1], value) + case ASTNotExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return true, nil + } + return false, nil + case ASTPipe: + result := value + var err error + for _, child := range node.children { + result, err = intr.Execute(child, result) + if err != nil { + return nil, err + } + } + return result, nil + case ASTProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.projectWithReflection(node, left) + } + return nil, nil + } + collected := []interface{}{} + var current interface{} + for _, element := range sliceType { + current, err = intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + case ASTSubexpression, ASTIndexExpression: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + return intr.Execute(node.children[1], left) + case ASTSlice: + sliceType, ok := value.([]interface{}) + if !ok { + if isSliceType(value) { + return intr.sliceWithReflection(node, value) + } + return nil, nil + } + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + return slice(sliceType, sliceParams) + case ASTValueProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + mapType, ok := left.(map[string]interface{}) + if !ok { + return nil, nil + } + values := make([]interface{}, len(mapType)) + for _, value := range mapType { + values = append(values, value) + } + collected := []interface{}{} + for _, element := range values { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + } + return nil, errors.New("Unknown AST node: " + node.nodeType.String()) +} + +func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { + rv := reflect.ValueOf(value) + first, n := utf8.DecodeRuneInString(key) + fieldName := string(unicode.ToUpper(first)) + key[n:] + if rv.Kind() == reflect.Struct { + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } else if rv.Kind() == reflect.Ptr { + // Handle multiple levels of indirection? + if rv.IsNil() { + return nil, nil + } + rv = rv.Elem() + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } + return nil, nil +} + +func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + flattened := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + if reflect.TypeOf(element).Kind() == reflect.Slice { + // Then insert the contents of the element + // slice into the flattened slice, + // i.e flattened = append(flattened, mySlice...) + elementV := reflect.ValueOf(element) + for j := 0; j < elementV.Len(); j++ { + flattened = append( + flattened, elementV.Index(j).Interface()) + } + } else { + flattened = append(flattened, element) + } + } + return flattened, nil +} + +func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + final := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + final = append(final, element) + } + return slice(final, sliceParams) +} + +func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { + compareNode := node.children[2] + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil +} + +func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if result != nil { + collected = append(collected, result) + } + } + return collected, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go new file mode 100644 index 000000000..817900c8f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/lexer.go @@ -0,0 +1,420 @@ +package jmespath + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +type token struct { + tokenType tokType + value string + position int + length int +} + +type tokType int + +const eof = -1 + +// Lexer contains information about the expression being tokenized. +type Lexer struct { + expression string // The expression provided by the user. + currentPos int // The current position in the string. + lastWidth int // The width of the current rune. This + buf bytes.Buffer // Internal buffer used for building up values. +} + +// SyntaxError is the main error used whenever a lexing or parsing error occurs. +type SyntaxError struct { + msg string // Error message displayed to user + Expression string // Expression that generated a SyntaxError + Offset int // The location in the string where the error occurred +} + +func (e SyntaxError) Error() string { + // In the future, it would be good to underline the specific + // location where the error occurred. + return "SyntaxError: " + e.msg +} + +// HighlightLocation will show where the syntax error occurred. +// It will place a "^" character on a line below the expression +// at the point where the syntax error occurred. +func (e SyntaxError) HighlightLocation() string { + return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" +} + +//go:generate stringer -type=tokType +const ( + tUnknown tokType = iota + tStar + tDot + tFilter + tFlatten + tLparen + tRparen + tLbracket + tRbracket + tLbrace + tRbrace + tOr + tPipe + tNumber + tUnquotedIdentifier + tQuotedIdentifier + tComma + tColon + tLT + tLTE + tGT + tGTE + tEQ + tNE + tJSONLiteral + tStringLiteral + tCurrent + tExpref + tAnd + tNot + tEOF +) + +var basicTokens = map[rune]tokType{ + '.': tDot, + '*': tStar, + ',': tComma, + ':': tColon, + '{': tLbrace, + '}': tRbrace, + ']': tRbracket, // tLbracket not included because it could be "[]" + '(': tLparen, + ')': tRparen, + '@': tCurrent, +} + +// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. +// When using this bitmask just be sure to shift the rune down 64 bits +// before checking against identifierStartBits. +const identifierStartBits uint64 = 576460745995190270 + +// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. +var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} + +var whiteSpace = map[rune]bool{ + ' ': true, '\t': true, '\n': true, '\r': true, +} + +func (t token) String() string { + return fmt.Sprintf("Token{%+v, %s, %d, %d}", + t.tokenType, t.value, t.position, t.length) +} + +// NewLexer creates a new JMESPath lexer. +func NewLexer() *Lexer { + lexer := Lexer{} + return &lexer +} + +func (lexer *Lexer) next() rune { + if lexer.currentPos >= len(lexer.expression) { + lexer.lastWidth = 0 + return eof + } + r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) + lexer.lastWidth = w + lexer.currentPos += w + return r +} + +func (lexer *Lexer) back() { + lexer.currentPos -= lexer.lastWidth +} + +func (lexer *Lexer) peek() rune { + t := lexer.next() + lexer.back() + return t +} + +// tokenize takes an expression and returns corresponding tokens. +func (lexer *Lexer) tokenize(expression string) ([]token, error) { + var tokens []token + lexer.expression = expression + lexer.currentPos = 0 + lexer.lastWidth = 0 +loop: + for { + r := lexer.next() + if identifierStartBits&(1<<(uint64(r)-64)) > 0 { + t := lexer.consumeUnquotedIdentifier() + tokens = append(tokens, t) + } else if val, ok := basicTokens[r]; ok { + // Basic single char token. + t := token{ + tokenType: val, + value: string(r), + position: lexer.currentPos - lexer.lastWidth, + length: 1, + } + tokens = append(tokens, t) + } else if r == '-' || (r >= '0' && r <= '9') { + t := lexer.consumeNumber() + tokens = append(tokens, t) + } else if r == '[' { + t := lexer.consumeLBracket() + tokens = append(tokens, t) + } else if r == '"' { + t, err := lexer.consumeQuotedIdentifier() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '\'' { + t, err := lexer.consumeRawStringLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '`' { + t, err := lexer.consumeLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '|' { + t := lexer.matchOrElse(r, '|', tOr, tPipe) + tokens = append(tokens, t) + } else if r == '<' { + t := lexer.matchOrElse(r, '=', tLTE, tLT) + tokens = append(tokens, t) + } else if r == '>' { + t := lexer.matchOrElse(r, '=', tGTE, tGT) + tokens = append(tokens, t) + } else if r == '!' { + t := lexer.matchOrElse(r, '=', tNE, tNot) + tokens = append(tokens, t) + } else if r == '=' { + t := lexer.matchOrElse(r, '=', tEQ, tUnknown) + tokens = append(tokens, t) + } else if r == '&' { + t := lexer.matchOrElse(r, '&', tAnd, tExpref) + tokens = append(tokens, t) + } else if r == eof { + break loop + } else if _, ok := whiteSpace[r]; ok { + // Ignore whitespace + } else { + return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) + } + } + tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) + return tokens, nil +} + +// Consume characters until the ending rune "r" is reached. +// If the end of the expression is reached before seeing the +// terminating rune "r", then an error is returned. +// If no error occurs then the matching substring is returned. +// The returned string will not include the ending rune. +func (lexer *Lexer) consumeUntil(end rune) (string, error) { + start := lexer.currentPos + current := lexer.next() + for current != end && current != eof { + if current == '\\' && lexer.peek() != eof { + lexer.next() + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return "", SyntaxError{ + msg: "Unclosed delimiter: " + string(end), + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil +} + +func (lexer *Lexer) consumeLiteral() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('`') + if err != nil { + return token{}, err + } + value = strings.Replace(value, "\\`", "`", -1) + return token{ + tokenType: tJSONLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) consumeRawStringLiteral() (token, error) { + start := lexer.currentPos + currentIndex := start + current := lexer.next() + for current != '\'' && lexer.peek() != eof { + if current == '\\' && lexer.peek() == '\'' { + chunk := lexer.expression[currentIndex : lexer.currentPos-1] + lexer.buf.WriteString(chunk) + lexer.buf.WriteString("'") + lexer.next() + currentIndex = lexer.currentPos + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return token{}, SyntaxError{ + msg: "Unclosed delimiter: '", + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + if currentIndex < lexer.currentPos { + lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) + } + value := lexer.buf.String() + // Reset the buffer so it can reused again. + lexer.buf.Reset() + return token{ + tokenType: tStringLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: lexer.expression, + Offset: lexer.currentPos - 1, + } +} + +// Checks for a two char token, otherwise matches a single character +// token. This is used whenever a two char token overlaps a single +// char token, e.g. "||" -> tPipe, "|" -> tOr. +func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == second { + t = token{ + tokenType: matchedType, + value: string(first) + string(second), + position: start, + length: 2, + } + } else { + lexer.back() + t = token{ + tokenType: singleCharType, + value: string(first), + position: start, + length: 1, + } + } + return t +} + +func (lexer *Lexer) consumeLBracket() token { + // There's three options here: + // 1. A filter expression "[?" + // 2. A flatten operator "[]" + // 3. A bare rbracket "[" + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == '?' { + t = token{ + tokenType: tFilter, + value: "[?", + position: start, + length: 2, + } + } else if nextRune == ']' { + t = token{ + tokenType: tFlatten, + value: "[]", + position: start, + length: 2, + } + } else { + t = token{ + tokenType: tLbracket, + value: "[", + position: start, + length: 1, + } + lexer.back() + } + return t +} + +func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('"') + if err != nil { + return token{}, err + } + var decoded string + asJSON := []byte("\"" + value + "\"") + if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { + return token{}, err + } + return token{ + tokenType: tQuotedIdentifier, + value: decoded, + position: start - 1, + length: len(decoded), + }, nil +} + +func (lexer *Lexer) consumeUnquotedIdentifier() token { + // Consume runes until we reach the end of an unquoted + // identifier. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tUnquotedIdentifier, + value: value, + position: start, + length: lexer.currentPos - start, + } +} + +func (lexer *Lexer) consumeNumber() token { + // Consume runes until we reach something that's not a number. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < '0' || r > '9' { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tNumber, + value: value, + position: start, + length: lexer.currentPos - start, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go new file mode 100644 index 000000000..4abc303ab --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/parser.go @@ -0,0 +1,603 @@ +package jmespath + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type astNodeType int + +//go:generate stringer -type astNodeType +const ( + ASTEmpty astNodeType = iota + ASTComparator + ASTCurrentNode + ASTExpRef + ASTFunctionExpression + ASTField + ASTFilterProjection + ASTFlatten + ASTIdentity + ASTIndex + ASTIndexExpression + ASTKeyValPair + ASTLiteral + ASTMultiSelectHash + ASTMultiSelectList + ASTOrExpression + ASTAndExpression + ASTNotExpression + ASTPipe + ASTProjection + ASTSubexpression + ASTSlice + ASTValueProjection +) + +// ASTNode represents the abstract syntax tree of a JMESPath expression. +type ASTNode struct { + nodeType astNodeType + value interface{} + children []ASTNode +} + +func (node ASTNode) String() string { + return node.PrettyPrint(0) +} + +// PrettyPrint will pretty print the parsed AST. +// The AST is an implementation detail and this pretty print +// function is provided as a convenience method to help with +// debugging. You should not rely on its output as the internal +// structure of the AST may change at any time. +func (node ASTNode) PrettyPrint(indent int) string { + spaces := strings.Repeat(" ", indent) + output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) + nextIndent := indent + 2 + if node.value != nil { + if converted, ok := node.value.(fmt.Stringer); ok { + // Account for things like comparator nodes + // that are enums with a String() method. + output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) + } else { + output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) + } + } + lastIndex := len(node.children) + if lastIndex > 0 { + output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) + childIndent := nextIndent + 2 + for _, elem := range node.children { + output += elem.PrettyPrint(childIndent) + } + } + output += fmt.Sprintf("%s}\n", spaces) + return output +} + +var bindingPowers = map[tokType]int{ + tEOF: 0, + tUnquotedIdentifier: 0, + tQuotedIdentifier: 0, + tRbracket: 0, + tRparen: 0, + tComma: 0, + tRbrace: 0, + tNumber: 0, + tCurrent: 0, + tExpref: 0, + tColon: 0, + tPipe: 1, + tOr: 2, + tAnd: 3, + tEQ: 5, + tLT: 5, + tLTE: 5, + tGT: 5, + tGTE: 5, + tNE: 5, + tFlatten: 9, + tStar: 20, + tFilter: 21, + tDot: 40, + tNot: 45, + tLbrace: 50, + tLbracket: 55, + tLparen: 60, +} + +// Parser holds state about the current expression being parsed. +type Parser struct { + expression string + tokens []token + index int +} + +// NewParser creates a new JMESPath parser. +func NewParser() *Parser { + p := Parser{} + return &p +} + +// Parse will compile a JMESPath expression. +func (p *Parser) Parse(expression string) (ASTNode, error) { + lexer := NewLexer() + p.expression = expression + p.index = 0 + tokens, err := lexer.tokenize(expression) + if err != nil { + return ASTNode{}, err + } + p.tokens = tokens + parsed, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() != tEOF { + return ASTNode{}, p.syntaxError(fmt.Sprintf( + "Unexpected token at the end of the expression: %s", p.current())) + } + return parsed, nil +} + +func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { + var err error + leftToken := p.lookaheadToken(0) + p.advance() + leftNode, err := p.nud(leftToken) + if err != nil { + return ASTNode{}, err + } + currentToken := p.current() + for bindingPower < bindingPowers[currentToken] { + p.advance() + leftNode, err = p.led(currentToken, leftNode) + if err != nil { + return ASTNode{}, err + } + currentToken = p.current() + } + return leftNode, nil +} + +func (p *Parser) parseIndexExpression() (ASTNode, error) { + if p.lookahead(0) == tColon || p.lookahead(1) == tColon { + return p.parseSliceExpression() + } + indexStr := p.lookaheadToken(0).value + parsedInt, err := strconv.Atoi(indexStr) + if err != nil { + return ASTNode{}, err + } + indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} + p.advance() + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return indexNode, nil +} + +func (p *Parser) parseSliceExpression() (ASTNode, error) { + parts := []*int{nil, nil, nil} + index := 0 + current := p.current() + for current != tRbracket && index < 3 { + if current == tColon { + index++ + p.advance() + } else if current == tNumber { + parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) + if err != nil { + return ASTNode{}, err + } + parts[index] = &parsedInt + p.advance() + } else { + return ASTNode{}, p.syntaxError( + "Expected tColon or tNumber" + ", received: " + p.current().String()) + } + current = p.current() + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTSlice, + value: parts, + }, nil +} + +func (p *Parser) match(tokenType tokType) error { + if p.current() == tokenType { + p.advance() + return nil + } + return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) +} + +func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { + switch tokenType { + case tDot: + if p.current() != tStar { + right, err := p.parseDotRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTSubexpression, + children: []ASTNode{node, right}, + }, err + } + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTValueProjection, + children: []ASTNode{node, right}, + }, err + case tPipe: + right, err := p.parseExpression(bindingPowers[tPipe]) + return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err + case tOr: + right, err := p.parseExpression(bindingPowers[tOr]) + return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err + case tAnd: + right, err := p.parseExpression(bindingPowers[tAnd]) + return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err + case tLparen: + name := node.value + var args []ASTNode + for p.current() != tRparen { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() == tComma { + if err := p.match(tComma); err != nil { + return ASTNode{}, err + } + } + args = append(args, expression) + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTFunctionExpression, + value: name, + children: args, + }, nil + case tFilter: + return p.parseFilter(node) + case tFlatten: + left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{left, right}, + }, err + case tEQ, tNE, tGT, tGTE, tLT, tLTE: + right, err := p.parseExpression(bindingPowers[tokenType]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTComparator, + value: tokenType, + children: []ASTNode{node, right}, + }, nil + case tLbracket: + tokenType := p.current() + var right ASTNode + var err error + if tokenType == tNumber || tokenType == tColon { + right, err = p.parseIndexExpression() + if err != nil { + return ASTNode{}, err + } + return p.projectIfSlice(node, right) + } + // Otherwise this is a projection. + if err := p.match(tStar); err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{node, right}, + }, nil + } + return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) +} + +func (p *Parser) nud(token token) (ASTNode, error) { + switch token.tokenType { + case tJSONLiteral: + var parsed interface{} + err := json.Unmarshal([]byte(token.value), &parsed) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTLiteral, value: parsed}, nil + case tStringLiteral: + return ASTNode{nodeType: ASTLiteral, value: token.value}, nil + case tUnquotedIdentifier: + return ASTNode{ + nodeType: ASTField, + value: token.value, + }, nil + case tQuotedIdentifier: + node := ASTNode{nodeType: ASTField, value: token.value} + if p.current() == tLparen { + return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) + } + return node, nil + case tStar: + left := ASTNode{nodeType: ASTIdentity} + var right ASTNode + var err error + if p.current() == tRbracket { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + } + return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err + case tFilter: + return p.parseFilter(ASTNode{nodeType: ASTIdentity}) + case tLbrace: + return p.parseMultiSelectHash() + case tFlatten: + left := ASTNode{ + nodeType: ASTFlatten, + children: []ASTNode{{nodeType: ASTIdentity}}, + } + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil + case tLbracket: + tokenType := p.current() + //var right ASTNode + if tokenType == tNumber || tokenType == tColon { + right, err := p.parseIndexExpression() + if err != nil { + return ASTNode{}, nil + } + return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) + } else if tokenType == tStar && p.lookahead(1) == tRbracket { + p.advance() + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{{nodeType: ASTIdentity}, right}, + }, nil + } else { + return p.parseMultiSelectList() + } + case tCurrent: + return ASTNode{nodeType: ASTCurrentNode}, nil + case tExpref: + expression, err := p.parseExpression(bindingPowers[tExpref]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil + case tNot: + expression, err := p.parseExpression(bindingPowers[tNot]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil + case tLparen: + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return expression, nil + case tEOF: + return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) + } + + return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) +} + +func (p *Parser) parseMultiSelectList() (ASTNode, error) { + var expressions []ASTNode + for { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + expressions = append(expressions, expression) + if p.current() == tRbracket { + break + } + err = p.match(tComma) + if err != nil { + return ASTNode{}, err + } + } + err := p.match(tRbracket) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTMultiSelectList, + children: expressions, + }, nil +} + +func (p *Parser) parseMultiSelectHash() (ASTNode, error) { + var children []ASTNode + for { + keyToken := p.lookaheadToken(0) + if err := p.match(tUnquotedIdentifier); err != nil { + if err := p.match(tQuotedIdentifier); err != nil { + return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") + } + } + keyName := keyToken.value + err := p.match(tColon) + if err != nil { + return ASTNode{}, err + } + value, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + node := ASTNode{ + nodeType: ASTKeyValPair, + value: keyName, + children: []ASTNode{value}, + } + children = append(children, node) + if p.current() == tComma { + err := p.match(tComma) + if err != nil { + return ASTNode{}, nil + } + } else if p.current() == tRbrace { + err := p.match(tRbrace) + if err != nil { + return ASTNode{}, nil + } + break + } + } + return ASTNode{ + nodeType: ASTMultiSelectHash, + children: children, + }, nil +} + +func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { + indexExpr := ASTNode{ + nodeType: ASTIndexExpression, + children: []ASTNode{left, right}, + } + if right.nodeType == ASTSlice { + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{indexExpr, right}, + }, err + } + return indexExpr, nil +} +func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { + var right, condition ASTNode + var err error + condition, err = p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + if p.current() == tFlatten { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tFilter]) + if err != nil { + return ASTNode{}, err + } + } + + return ASTNode{ + nodeType: ASTFilterProjection, + children: []ASTNode{node, right, condition}, + }, nil +} + +func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { + lookahead := p.current() + if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { + return p.parseExpression(bindingPower) + } else if lookahead == tLbracket { + if err := p.match(tLbracket); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectList() + } else if lookahead == tLbrace { + if err := p.match(tLbrace); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectHash() + } + return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") +} + +func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { + current := p.current() + if bindingPowers[current] < 10 { + return ASTNode{nodeType: ASTIdentity}, nil + } else if current == tLbracket { + return p.parseExpression(bindingPower) + } else if current == tFilter { + return p.parseExpression(bindingPower) + } else if current == tDot { + err := p.match(tDot) + if err != nil { + return ASTNode{}, err + } + return p.parseDotRHS(bindingPower) + } else { + return ASTNode{}, p.syntaxError("Error") + } +} + +func (p *Parser) lookahead(number int) tokType { + return p.lookaheadToken(number).tokenType +} + +func (p *Parser) current() tokType { + return p.lookahead(0) +} + +func (p *Parser) lookaheadToken(number int) token { + return p.tokens[p.index+number] +} + +func (p *Parser) advance() { + p.index++ +} + +func tokensOneOf(elements []tokType, token tokType) bool { + for _, elem := range elements { + if elem == token { + return true + } + } + return false +} + +func (p *Parser) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: p.lookaheadToken(0).position, + } +} + +// Create a SyntaxError based on the provided token. +// This differs from syntaxError() which creates a SyntaxError +// based on the current lookahead token. +func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: t.position, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go new file mode 100644 index 000000000..dae79cbdf --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/toktype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=tokType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" + +var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} + +func (i tokType) String() string { + if i < 0 || i >= tokType(len(_tokType_index)-1) { + return fmt.Sprintf("tokType(%d)", i) + } + return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go new file mode 100644 index 000000000..ddc1b7d7d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/util.go @@ -0,0 +1,185 @@ +package jmespath + +import ( + "errors" + "reflect" +) + +// IsFalse determines if an object is false based on the JMESPath spec. +// JMESPath defines false values to be any of: +// - An empty string array, or hash. +// - The boolean value false. +// - nil +func isFalse(value interface{}) bool { + switch v := value.(type) { + case bool: + return !v + case []interface{}: + return len(v) == 0 + case map[string]interface{}: + return len(v) == 0 + case string: + return len(v) == 0 + case nil: + return true + } + // Try the reflection cases before returning false. + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Struct: + // A struct type will never be false, even if + // all of its values are the zero type. + return false + case reflect.Slice, reflect.Map: + return rv.Len() == 0 + case reflect.Ptr: + if rv.IsNil() { + return true + } + // If it's a pointer type, we'll try to deref the pointer + // and evaluate the pointer value for isFalse. + element := rv.Elem() + return isFalse(element.Interface()) + } + return false +} + +// ObjsEqual is a generic object equality check. +// It will take two arbitrary objects and recursively determine +// if they are equal. +func objsEqual(left interface{}, right interface{}) bool { + return reflect.DeepEqual(left, right) +} + +// SliceParam refers to a single part of a slice. +// A slice consists of a start, a stop, and a step, similar to +// python slices. +type sliceParam struct { + N int + Specified bool +} + +// Slice supports [start:stop:step] style slicing that's supported in JMESPath. +func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { + computed, err := computeSliceParams(len(slice), parts) + if err != nil { + return nil, err + } + start, stop, step := computed[0], computed[1], computed[2] + result := []interface{}{} + if step > 0 { + for i := start; i < stop; i += step { + result = append(result, slice[i]) + } + } else { + for i := start; i > stop; i += step { + result = append(result, slice[i]) + } + } + return result, nil +} + +func computeSliceParams(length int, parts []sliceParam) ([]int, error) { + var start, stop, step int + if !parts[2].Specified { + step = 1 + } else if parts[2].N == 0 { + return nil, errors.New("Invalid slice, step cannot be 0") + } else { + step = parts[2].N + } + var stepValueNegative bool + if step < 0 { + stepValueNegative = true + } else { + stepValueNegative = false + } + + if !parts[0].Specified { + if stepValueNegative { + start = length - 1 + } else { + start = 0 + } + } else { + start = capSlice(length, parts[0].N, step) + } + + if !parts[1].Specified { + if stepValueNegative { + stop = -1 + } else { + stop = length + } + } else { + stop = capSlice(length, parts[1].N, step) + } + return []int{start, stop, step}, nil +} + +func capSlice(length int, actual int, step int) int { + if actual < 0 { + actual += length + if actual < 0 { + if step < 0 { + actual = -1 + } else { + actual = 0 + } + } + } else if actual >= length { + if step < 0 { + actual = length - 1 + } else { + actual = length + } + } + return actual +} + +// ToArrayNum converts an empty interface type to a slice of float64. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. +func toArrayNum(data interface{}) ([]float64, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]float64, len(d)) + for i, el := range d { + item, ok := el.(float64) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +// ToArrayStr converts an empty interface type to a slice of strings. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. If the input data could be entirely +// converted, then the converted data, along with a second value of true, +// will be returned. +func toArrayStr(data interface{}) ([]string, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]string, len(d)) + for i, el := range d { + item, ok := el.(string) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +func isSliceType(v interface{}) bool { + if v == nil { + return false + } + return reflect.TypeOf(v).Kind() == reflect.Slice +} diff --git a/vendor/github.com/kr/fs/LICENSE b/vendor/github.com/kr/fs/LICENSE new file mode 100644 index 000000000..744875676 --- /dev/null +++ b/vendor/github.com/kr/fs/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/kr/fs/Readme b/vendor/github.com/kr/fs/Readme new file mode 100644 index 000000000..c95e13fc8 --- /dev/null +++ b/vendor/github.com/kr/fs/Readme @@ -0,0 +1,3 @@ +Filesystem Package + +http://godoc.org/github.com/kr/fs diff --git a/vendor/github.com/kr/fs/filesystem.go b/vendor/github.com/kr/fs/filesystem.go new file mode 100644 index 000000000..f1c4805fb --- /dev/null +++ b/vendor/github.com/kr/fs/filesystem.go @@ -0,0 +1,36 @@ +package fs + +import ( + "io/ioutil" + "os" + "path/filepath" +) + +// FileSystem defines the methods of an abstract filesystem. +type FileSystem interface { + + // ReadDir reads the directory named by dirname and returns a + // list of directory entries. + ReadDir(dirname string) ([]os.FileInfo, error) + + // Lstat returns a FileInfo describing the named file. If the file is a + // symbolic link, the returned FileInfo describes the symbolic link. Lstat + // makes no attempt to follow the link. + Lstat(name string) (os.FileInfo, error) + + // Join joins any number of path elements into a single path, adding a + // separator if necessary. The result is Cleaned; in particular, all + // empty strings are ignored. + // + // The separator is FileSystem specific. + Join(elem ...string) string +} + +// fs represents a FileSystem provided by the os package. +type fs struct{} + +func (f *fs) ReadDir(dirname string) ([]os.FileInfo, error) { return ioutil.ReadDir(dirname) } + +func (f *fs) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) } + +func (f *fs) Join(elem ...string) string { return filepath.Join(elem...) } diff --git a/vendor/github.com/kr/fs/walk.go b/vendor/github.com/kr/fs/walk.go new file mode 100644 index 000000000..6ffa1e0b2 --- /dev/null +++ b/vendor/github.com/kr/fs/walk.go @@ -0,0 +1,95 @@ +// Package fs provides filesystem-related functions. +package fs + +import ( + "os" +) + +// Walker provides a convenient interface for iterating over the +// descendants of a filesystem path. +// Successive calls to the Step method will step through each +// file or directory in the tree, including the root. The files +// are walked in lexical order, which makes the output deterministic +// but means that for very large directories Walker can be inefficient. +// Walker does not follow symbolic links. +type Walker struct { + fs FileSystem + cur item + stack []item + descend bool +} + +type item struct { + path string + info os.FileInfo + err error +} + +// Walk returns a new Walker rooted at root. +func Walk(root string) *Walker { + return WalkFS(root, new(fs)) +} + +// WalkFS returns a new Walker rooted at root on the FileSystem fs. +func WalkFS(root string, fs FileSystem) *Walker { + info, err := fs.Lstat(root) + return &Walker{ + fs: fs, + stack: []item{{root, info, err}}, + } +} + +// Step advances the Walker to the next file or directory, +// which will then be available through the Path, Stat, +// and Err methods. +// It returns false when the walk stops at the end of the tree. +func (w *Walker) Step() bool { + if w.descend && w.cur.err == nil && w.cur.info.IsDir() { + list, err := w.fs.ReadDir(w.cur.path) + if err != nil { + w.cur.err = err + w.stack = append(w.stack, w.cur) + } else { + for i := len(list) - 1; i >= 0; i-- { + path := w.fs.Join(w.cur.path, list[i].Name()) + w.stack = append(w.stack, item{path, list[i], nil}) + } + } + } + + if len(w.stack) == 0 { + return false + } + i := len(w.stack) - 1 + w.cur = w.stack[i] + w.stack = w.stack[:i] + w.descend = true + return true +} + +// Path returns the path to the most recent file or directory +// visited by a call to Step. It contains the argument to Walk +// as a prefix; that is, if Walk is called with "dir", which is +// a directory containing the file "a", Path will return "dir/a". +func (w *Walker) Path() string { + return w.cur.path +} + +// Stat returns info for the most recent file or directory +// visited by a call to Step. +func (w *Walker) Stat() os.FileInfo { + return w.cur.info +} + +// Err returns the error, if any, for the most recent attempt +// by Step to visit a file or directory. If a directory has +// an error, w will not descend into that directory. +func (w *Walker) Err() error { + return w.cur.err +} + +// SkipDir causes the currently visited directory to be skipped. +// If w is not on a directory, SkipDir has no effect. +func (w *Walker) SkipDir() { + w.descend = false +} diff --git a/vendor/github.com/mattn/go-ieproxy/.gitignore b/vendor/github.com/mattn/go-ieproxy/.gitignore new file mode 100644 index 000000000..bc8a670e0 --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/.gitignore @@ -0,0 +1 @@ +.idea/* \ No newline at end of file diff --git a/vendor/github.com/mattn/go-ieproxy/LICENSE b/vendor/github.com/mattn/go-ieproxy/LICENSE new file mode 100644 index 000000000..7b7c0f855 --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/LICENSE @@ -0,0 +1,23 @@ +MIT License + +Copyright (c) 2014 mattn +Copyright (c) 2017 oliverpool +Copyright (c) 2019 Adele Reed + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-ieproxy/README.md b/vendor/github.com/mattn/go-ieproxy/README.md new file mode 100644 index 000000000..3e3b4759c --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/README.md @@ -0,0 +1,51 @@ +# ieproxy + +Go package to detect the proxy settings on Windows platform, and MacOS. + +On Windows, the settings are initially attempted to be read from the [`WinHttpGetIEProxyConfigForCurrentUser` DLL call](https://docs.microsoft.com/en-us/windows/desktop/api/winhttp/nf-winhttp-winhttpgetieproxyconfigforcurrentuser), but falls back to the registry (`CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Internet Settings`) in the event the DLL call fails. + +On MacOS, the settings are read from [`CFNetworkCopySystemProxySettings` method of CFNetwork](https://developer.apple.com/documentation/cfnetwork/1426754-cfnetworkcopysystemproxysettings?language=objc). + +For more information, take a look at the [documentation](https://godoc.org/github.com/mattn/go-ieproxy) + +## Methods + +You can either obtain a `net/http` compatible proxy function using `ieproxy.GetProxyFunc()`, set environment variables using `ieproxy.OverrideEnvWithStaticProxy()` (though no automatic configuration is available this way), or obtain the proxy settings via `ieproxy.GetConf()`. + +| Method | Supported configuration options: | +|----------------------------------------|-----------------------------------------------| +| `ieproxy.GetProxyFunc()` | Static, Specified script, and fully automatic | +| `ieproxy.OverrideEnvWithStaticProxy()` | Static | +| `ieproxy.GetConf()` | Depends on how you use it | + +## Examples + +### Using GetProxyFunc(): + +```go +func init() { + http.DefaultTransport.(*http.Transport).Proxy = ieproxy.GetProxyFunc() +} +``` + +GetProxyFunc acts as a middleman between `net/http` and `mattn/go-ieproxy` in order to select the correct proxy configuration based off the details supplied in the config. + +### Using OverrideEnvWithStaticProxy(): + +```go +func init() { + ieproxy.OverrideEnvWithStaticProxy() + http.DefaultTransport.(*http.Transport).Proxy = http.ProxyFromEnvironment +} +``` + +OverrideEnvWithStaticProxy overrides the relevant environment variables (`HTTP_PROXY`, `HTTPS_PROXY`, `NO_PROXY`) with the **static, manually configured** proxy details typically found in the registry. + +### Using GetConf(): + +```go +func main() { + conf := ieproxy.GetConf() + //Handle proxies how you want to. +} +``` diff --git a/vendor/github.com/mattn/go-ieproxy/ieproxy.go b/vendor/github.com/mattn/go-ieproxy/ieproxy.go new file mode 100644 index 000000000..0b5460bb8 --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/ieproxy.go @@ -0,0 +1,56 @@ +// Package ieproxy is a utility to retrieve the proxy parameters (especially of Internet Explorer on windows) +// +// On windows, it gathers the parameters from the registry (regedit), while it uses env variable on other platforms +package ieproxy + +import "os" + +// ProxyConf gathers the configuration for proxy +type ProxyConf struct { + Static StaticProxyConf // static configuration + Automatic ProxyScriptConf // script configuration +} + +// StaticProxyConf contains the configuration for static proxy +type StaticProxyConf struct { + // Is the proxy active? + Active bool + // Proxy address for each scheme (http, https) + // "" (empty string) is the fallback proxy + Protocols map[string]string + // Addresses not to be browsed via the proxy (comma-separated, linux-like) + NoProxy string +} + +// ProxyScriptConf contains the configuration for automatic proxy +type ProxyScriptConf struct { + // Is the proxy active? + Active bool + // PreConfiguredURL of the .pac file. + // If this is empty and Active is true, auto-configuration should be assumed. + PreConfiguredURL string +} + +// GetConf retrieves the proxy configuration from the Windows Regedit +func GetConf() ProxyConf { + return getConf() +} + +// ReloadConf reloads the proxy configuration +func ReloadConf() ProxyConf { + return reloadConf() +} + +// OverrideEnvWithStaticProxy writes new values to the +// `http_proxy`, `https_proxy` and `no_proxy` environment variables. +// The values are taken from the Windows Regedit (should be called in `init()` function - see example) +func OverrideEnvWithStaticProxy() { + overrideEnvWithStaticProxy(GetConf(), os.Setenv) +} + +// FindProxyForURL computes the proxy for a given URL according to the pac file +func (psc *ProxyScriptConf) FindProxyForURL(URL string) string { + return psc.findProxyForURL(URL) +} + +type envSetter func(string, string) error diff --git a/vendor/github.com/mattn/go-ieproxy/ieproxy_darwin.go b/vendor/github.com/mattn/go-ieproxy/ieproxy_darwin.go new file mode 100644 index 000000000..e56a70354 --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/ieproxy_darwin.go @@ -0,0 +1,126 @@ +//go:build !ios && !iossimulator +// +build !ios,!iossimulator + +package ieproxy + +/* +#cgo LDFLAGS: -framework CoreFoundation +#cgo LDFLAGS: -framework CFNetwork +#include +#include +*/ +import "C" + +import ( + "fmt" + "strings" + "sync" + "unsafe" +) + +var once sync.Once +var darwinProxyConf ProxyConf + +// GetConf retrieves the proxy configuration from the Windows Regedit +func getConf() ProxyConf { + once.Do(writeConf) + return darwinProxyConf +} + +// reloadConf forces a reload of the proxy configuration. +func reloadConf() ProxyConf { + writeConf() + return getConf() +} + +func cfStringGetGoString(cfStr C.CFStringRef) string { + retCString := (*C.char)(C.calloc(C.ulong(uint(128)), 1)) + defer C.free(unsafe.Pointer(retCString)) + + C.CFStringGetCString(cfStr, retCString, C.long(128), C.kCFStringEncodingUTF8) + return C.GoString(retCString) +} + +func cfNumberGetGoInt(cfNum C.CFNumberRef) int { + ret := 0 + C.CFNumberGetValue(cfNum, C.kCFNumberIntType, unsafe.Pointer(&ret)) + return ret +} + +func cfArrayGetGoStrings(cfArray C.CFArrayRef) []string { + var ret []string + for i := 0; i < int(C.CFArrayGetCount(cfArray)); i++ { + cfStr := C.CFStringRef(C.CFArrayGetValueAtIndex(cfArray, C.long(i))) + if unsafe.Pointer(cfStr) != C.NULL { + ret = append(ret, cfStringGetGoString(cfStr)) + } + } + return ret +} + +func writeConf() { + cfDictProxy := C.CFDictionaryRef(C.CFNetworkCopySystemProxySettings()) + defer C.CFRelease(C.CFTypeRef(cfDictProxy)) + darwinProxyConf = ProxyConf{} + + cfNumHttpEnable := C.CFNumberRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesHTTPEnable))) + if unsafe.Pointer(cfNumHttpEnable) != C.NULL && cfNumberGetGoInt(cfNumHttpEnable) > 0 { + darwinProxyConf.Static.Active = true + if darwinProxyConf.Static.Protocols == nil { + darwinProxyConf.Static.Protocols = make(map[string]string) + } + httpHost := C.CFStringRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesHTTPProxy))) + httpPort := C.CFNumberRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesHTTPPort))) + + httpProxy := fmt.Sprintf("%s:%d", cfStringGetGoString(httpHost), cfNumberGetGoInt(httpPort)) + darwinProxyConf.Static.Protocols["http"] = httpProxy + } + + cfNumHttpsEnable := C.CFNumberRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesHTTPSEnable))) + if unsafe.Pointer(cfNumHttpsEnable) != C.NULL && cfNumberGetGoInt(cfNumHttpsEnable) > 0 { + darwinProxyConf.Static.Active = true + if darwinProxyConf.Static.Protocols == nil { + darwinProxyConf.Static.Protocols = make(map[string]string) + } + httpsHost := C.CFStringRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesHTTPSProxy))) + httpsPort := C.CFNumberRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesHTTPSPort))) + + httpProxy := fmt.Sprintf("%s:%d", cfStringGetGoString(httpsHost), cfNumberGetGoInt(httpsPort)) + darwinProxyConf.Static.Protocols["https"] = httpProxy + } + + if darwinProxyConf.Static.Active { + cfArrayExceptionList := C.CFArrayRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesExceptionsList))) + if unsafe.Pointer(cfArrayExceptionList) != C.NULL { + exceptionList := cfArrayGetGoStrings(cfArrayExceptionList) + darwinProxyConf.Static.NoProxy = strings.Join(exceptionList, ",") + } + } + + cfNumPacEnable := C.CFNumberRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesProxyAutoConfigEnable))) + if unsafe.Pointer(cfNumPacEnable) != C.NULL && cfNumberGetGoInt(cfNumPacEnable) > 0 { + cfStringPac := C.CFStringRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesProxyAutoConfigURLString))) + if unsafe.Pointer(cfStringPac) != C.NULL { + pac := cfStringGetGoString(cfStringPac) + darwinProxyConf.Automatic.PreConfiguredURL = pac + darwinProxyConf.Automatic.Active = true + } + } +} + +// OverrideEnvWithStaticProxy writes new values to the +// http_proxy, https_proxy and no_proxy environment variables. +// The values are taken from the MacOS System Preferences. +func overrideEnvWithStaticProxy(conf ProxyConf, setenv envSetter) { + if conf.Static.Active { + for _, scheme := range []string{"http", "https"} { + url := conf.Static.Protocols[scheme] + if url != "" { + setenv(scheme+"_proxy", url) + } + } + if conf.Static.NoProxy != "" { + setenv("no_proxy", conf.Static.NoProxy) + } + } +} diff --git a/vendor/github.com/mattn/go-ieproxy/ieproxy_ios.go b/vendor/github.com/mattn/go-ieproxy/ieproxy_ios.go new file mode 100644 index 000000000..eda2f0a0e --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/ieproxy_ios.go @@ -0,0 +1,15 @@ +//go:build ios || iossimulator +// +build ios iossimulator + +package ieproxy + +func getConf() ProxyConf { + return ProxyConf{} +} + +func reloadConf() ProxyConf { + return getConf() +} + +func overrideEnvWithStaticProxy(pc ProxyConf, setenv envSetter) { +} diff --git a/vendor/github.com/mattn/go-ieproxy/ieproxy_unix.go b/vendor/github.com/mattn/go-ieproxy/ieproxy_unix.go new file mode 100644 index 000000000..65c54ecd1 --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/ieproxy_unix.go @@ -0,0 +1,16 @@ +//go:build !windows && (!darwin || !cgo) +// +build !windows +// +build !darwin !cgo + +package ieproxy + +func getConf() ProxyConf { + return ProxyConf{} +} + +func reloadConf() ProxyConf { + return getConf() +} + +func overrideEnvWithStaticProxy(pc ProxyConf, setenv envSetter) { +} diff --git a/vendor/github.com/mattn/go-ieproxy/ieproxy_windows.go b/vendor/github.com/mattn/go-ieproxy/ieproxy_windows.go new file mode 100644 index 000000000..7fd375017 --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/ieproxy_windows.go @@ -0,0 +1,219 @@ +package ieproxy + +import ( + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/windows/registry" +) + +type regeditValues struct { + ProxyServer string + ProxyOverride string + ProxyEnable uint64 + AutoConfigURL string +} + +var once sync.Once +var windowsProxyConf ProxyConf + +// GetConf retrieves the proxy configuration from the Windows Regedit +func getConf() ProxyConf { + once.Do(writeConf) + return windowsProxyConf +} + +// reloadConf forces a reload of the proxy configuration from the Windows registry +func reloadConf() ProxyConf { + writeConf() + return getConf() +} + +func writeConf() { + proxy := "" + proxyByPass := "" + autoConfigUrl := "" + autoDetect := false + + // Try from IE first. + if ieCfg, err := getUserConfigFromWindowsSyscall(); err == nil { + defer globalFreeWrapper(ieCfg.lpszProxy) + defer globalFreeWrapper(ieCfg.lpszProxyBypass) + defer globalFreeWrapper(ieCfg.lpszAutoConfigUrl) + + proxy = StringFromUTF16Ptr(ieCfg.lpszProxy) + proxyByPass = StringFromUTF16Ptr(ieCfg.lpszProxyBypass) + autoConfigUrl = StringFromUTF16Ptr(ieCfg.lpszAutoConfigUrl) + autoDetect = ieCfg.fAutoDetect + } + + if proxy == "" && !autoDetect { + // Try WinHTTP default proxy. + if defaultCfg, err := getDefaultProxyConfiguration(); err == nil { + defer globalFreeWrapper(defaultCfg.lpszProxy) + defer globalFreeWrapper(defaultCfg.lpszProxyBypass) + + // Always set both of these (they are a pair, it doesn't make sense to set one here and keep the value of the other from above) + proxy = StringFromUTF16Ptr(defaultCfg.lpszProxy) + proxyByPass = StringFromUTF16Ptr(defaultCfg.lpszProxyBypass) + } + } + + if proxy == "" && !autoDetect { + // Fall back to IE registry or manual detection if nothing is found there.. + regedit, _ := readRegedit() // If the syscall fails, backup to manual detection. + windowsProxyConf = parseRegedit(regedit) + return + } + + // Setting the proxy settings. + windowsProxyConf = ProxyConf{ + Static: StaticProxyConf{ + Active: len(proxy) > 0, + }, + Automatic: ProxyScriptConf{ + Active: len(autoConfigUrl) > 0 || autoDetect, + }, + } + + if windowsProxyConf.Static.Active { + protocol := make(map[string]string) + for _, s := range strings.Split(proxy, ";") { + s = strings.TrimSpace(s) + if s == "" { + continue + } + pair := strings.SplitN(s, "=", 2) + if len(pair) > 1 { + protocol[pair[0]] = pair[1] + } else { + protocol[""] = pair[0] + } + } + + windowsProxyConf.Static.Protocols = protocol + if len(proxyByPass) > 0 { + windowsProxyConf.Static.NoProxy = strings.Replace(proxyByPass, ";", ",", -1) + } + } + + if windowsProxyConf.Automatic.Active { + windowsProxyConf.Automatic.PreConfiguredURL = autoConfigUrl + } +} + +func getUserConfigFromWindowsSyscall() (*tWINHTTP_CURRENT_USER_IE_PROXY_CONFIG, error) { + if err := winHttpGetIEProxyConfigForCurrentUser.Find(); err != nil { + return nil, err + } + p := new(tWINHTTP_CURRENT_USER_IE_PROXY_CONFIG) + r, _, err := winHttpGetIEProxyConfigForCurrentUser.Call(uintptr(unsafe.Pointer(p))) + if rTrue(r) { + return p, nil + } + return nil, err +} + +func getDefaultProxyConfiguration() (*tWINHTTP_PROXY_INFO, error) { + pInfo := new(tWINHTTP_PROXY_INFO) + if err := winHttpGetDefaultProxyConfiguration.Find(); err != nil { + return nil, err + } + r, _, err := winHttpGetDefaultProxyConfiguration.Call(uintptr(unsafe.Pointer(pInfo))) + if rTrue(r) { + return pInfo, nil + } + return nil, err +} + +// OverrideEnvWithStaticProxy writes new values to the +// http_proxy, https_proxy and no_proxy environment variables. +// The values are taken from the Windows Regedit (should be called in init() function) +func overrideEnvWithStaticProxy(conf ProxyConf, setenv envSetter) { + if conf.Static.Active { + for _, scheme := range []string{"http", "https"} { + url := mapFallback(scheme, "", conf.Static.Protocols) + setenv(scheme+"_proxy", url) + } + if conf.Static.NoProxy != "" { + setenv("no_proxy", conf.Static.NoProxy) + } + } +} + +func parseRegedit(regedit regeditValues) ProxyConf { + protocol := make(map[string]string) + for _, s := range strings.Split(regedit.ProxyServer, ";") { + if s == "" { + continue + } + pair := strings.SplitN(s, "=", 2) + if len(pair) > 1 { + protocol[pair[0]] = pair[1] + } else { + protocol[""] = pair[0] + } + } + + return ProxyConf{ + Static: StaticProxyConf{ + Active: regedit.ProxyEnable > 0, + Protocols: protocol, + NoProxy: strings.Replace(regedit.ProxyOverride, ";", ",", -1), // to match linux style + }, + Automatic: ProxyScriptConf{ + Active: regedit.AutoConfigURL != "", + PreConfiguredURL: regedit.AutoConfigURL, + }, + } +} + +func readRegedit() (values regeditValues, err error) { + var proxySettingsPerUser uint64 = 1 // 1 is the default value to consider current user + k, err := registry.OpenKey(registry.LOCAL_MACHINE, `Software\Policies\Microsoft\Windows\CurrentVersion\Internet Settings`, registry.QUERY_VALUE) + if err == nil { + //We had used the below variable tempPrxUsrSettings, because the Golang method GetIntegerValue + //sets the value to zero even it fails. + tempPrxUsrSettings, _, err := k.GetIntegerValue("ProxySettingsPerUser") + if err == nil { + //consider the value of tempPrxUsrSettings if it is a success + proxySettingsPerUser = tempPrxUsrSettings + } + k.Close() + } + + var hkey registry.Key + if proxySettingsPerUser == 0 { + hkey = registry.LOCAL_MACHINE + } else { + hkey = registry.CURRENT_USER + } + + k, err = registry.OpenKey(hkey, `Software\Microsoft\Windows\CurrentVersion\Internet Settings`, registry.QUERY_VALUE) + if err != nil { + return + } + defer k.Close() + + values.ProxyServer, _, err = k.GetStringValue("ProxyServer") + if err != nil && err != registry.ErrNotExist { + return + } + values.ProxyOverride, _, err = k.GetStringValue("ProxyOverride") + if err != nil && err != registry.ErrNotExist { + return + } + + values.ProxyEnable, _, err = k.GetIntegerValue("ProxyEnable") + if err != nil && err != registry.ErrNotExist { + return + } + + values.AutoConfigURL, _, err = k.GetStringValue("AutoConfigURL") + if err != nil && err != registry.ErrNotExist { + return + } + err = nil + return +} diff --git a/vendor/github.com/mattn/go-ieproxy/kernel32_data_windows.go b/vendor/github.com/mattn/go-ieproxy/kernel32_data_windows.go new file mode 100644 index 000000000..30ebbd22a --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/kernel32_data_windows.go @@ -0,0 +1,19 @@ +package ieproxy + +import ( + "golang.org/x/sys/windows" + "unsafe" +) + +var kernel32 = windows.NewLazySystemDLL("kernel32.dll") +var globalFree = kernel32.NewProc("GlobalFree") + +func globalFreeWrapper(ptr *uint16) { + if ptr != nil { + _, _, _ = globalFree.Call(uintptr(unsafe.Pointer(ptr))) + } +} + +func rTrue(r uintptr) bool { + return r == 1 +} diff --git a/vendor/github.com/mattn/go-ieproxy/pac_darwin.go b/vendor/github.com/mattn/go-ieproxy/pac_darwin.go new file mode 100644 index 000000000..a8bf90e94 --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/pac_darwin.go @@ -0,0 +1,141 @@ +package ieproxy + +/* +#cgo LDFLAGS: -framework CoreFoundation +#cgo LDFLAGS: -framework CFNetwork +#include +#include + +#define STR_LEN 128 + +void proxyAutoConfCallback(void* client, CFArrayRef proxies, CFErrorRef error) { + CFTypeRef* result_ptr = (CFTypeRef*)client; + if (error != NULL) { + *result_ptr = CFRetain(error); + } else { + *result_ptr = CFRetain(proxies); + } + CFRunLoopStop(CFRunLoopGetCurrent()); +} + +int intCFNumber(CFNumberRef num) { + int ret; + CFNumberGetValue(num, kCFNumberIntType, &ret); + return ret; +} + +char* _getProxyUrlFromPac(char* pac, char* reqCs) { + char* retCString = (char*)calloc(STR_LEN, sizeof(char)); + + CFStringRef reqStr = CFStringCreateWithCString(NULL, reqCs, kCFStringEncodingUTF8); + CFStringRef pacStr = CFStringCreateWithCString(NULL, pac, kCFStringEncodingUTF8); + CFURLRef pacUrl = CFURLCreateWithString(NULL, pacStr, NULL); + CFURLRef reqUrl = CFURLCreateWithString(NULL, reqStr, NULL); + + CFTypeRef result = NULL; + CFStreamClientContext context = { 0, &result, NULL, NULL, NULL }; + CFRunLoopSourceRef runloop_src = CFNetworkExecuteProxyAutoConfigurationURL(pacUrl, reqUrl, proxyAutoConfCallback, &context); + + if (runloop_src) { + const CFStringRef private_runloop_mode = CFSTR("go-ieproxy"); + CFRunLoopAddSource(CFRunLoopGetCurrent(), runloop_src, private_runloop_mode); + CFRunLoopRunInMode(private_runloop_mode, DBL_MAX, false); + CFRunLoopRemoveSource(CFRunLoopGetCurrent(), runloop_src, kCFRunLoopCommonModes); + + if (CFGetTypeID(result) == CFArrayGetTypeID()) { + CFArrayRef resultArray = (CFTypeRef)result; + if (CFArrayGetCount(resultArray) > 0) { + CFDictionaryRef pxy = (CFDictionaryRef)CFArrayGetValueAtIndex(resultArray, 0); + CFStringRef pxyType = CFDictionaryGetValue(pxy, kCFProxyTypeKey); + + if (CFEqual(pxyType, kCFProxyTypeNone)) { + // noop + } + + if (CFEqual(pxyType, kCFProxyTypeHTTP)) { + CFStringRef host = (CFStringRef)CFDictionaryGetValue(pxy, kCFProxyHostNameKey); + CFNumberRef port = (CFNumberRef)CFDictionaryGetValue(pxy, kCFProxyPortNumberKey); + + char host_str[STR_LEN - 16]; + CFStringGetCString(host, host_str, STR_LEN - 16, kCFStringEncodingUTF8); + + int port_int = 80; + if (port) { + CFNumberGetValue(port, kCFNumberIntType, &port_int); + } + + sprintf(retCString, "%s:%d", host_str, port_int); + } + } + } else { + // error + } + } + + CFRelease(result); + CFRelease(reqStr); + CFRelease(reqUrl); + CFRelease(pacStr); + CFRelease(pacUrl); + return retCString; +} + +char* _getPacUrl() { + char* retCString = (char*)calloc(STR_LEN, sizeof(char)); + CFDictionaryRef proxyDict = CFNetworkCopySystemProxySettings(); + CFNumberRef pacEnable = (CFNumberRef)CFDictionaryGetValue(proxyDict, kCFNetworkProxiesProxyAutoConfigEnable); + + if (pacEnable && intCFNumber(pacEnable)) { + CFStringRef pacUrlStr = (CFStringRef)CFDictionaryGetValue(proxyDict, kCFNetworkProxiesProxyAutoConfigURLString); + if (pacUrlStr) { + CFStringGetCString(pacUrlStr, retCString, STR_LEN, kCFStringEncodingUTF8); + } + } + + CFRelease(proxyDict); + return retCString; +} + +*/ +import "C" +import ( + "net/url" + "unsafe" +) + +func (psc *ProxyScriptConf) findProxyForURL(URL string) string { + if !psc.Active { + return "" + } + proxy := getProxyForURL(psc.PreConfiguredURL, URL) + return proxy +} + +func getProxyForURL(pacFileURL, targetURL string) string { + if pacFileURL == "" { + pacFileURL = getPacUrl() + } + if pacFileURL == "" { + return "" + } + if u, err := url.Parse(pacFileURL); err != nil || u.Scheme == "" { + return "" + } + + csUrl := C.CString(targetURL) + csPac := C.CString(pacFileURL) + csRet := C._getProxyUrlFromPac(csPac, csUrl) + + defer C.free(unsafe.Pointer(csUrl)) + defer C.free(unsafe.Pointer(csPac)) + defer C.free(unsafe.Pointer(csRet)) + + return C.GoString(csRet) +} + +func getPacUrl() string { + csRet := C._getPacUrl() + + defer C.free(unsafe.Pointer(csRet)) + return C.GoString(csRet) +} diff --git a/vendor/github.com/mattn/go-ieproxy/pac_unix.go b/vendor/github.com/mattn/go-ieproxy/pac_unix.go new file mode 100644 index 000000000..3a659eb6d --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/pac_unix.go @@ -0,0 +1,9 @@ +//go:build !windows && (!darwin || !cgo) +// +build !windows +// +build !darwin !cgo + +package ieproxy + +func (psc *ProxyScriptConf) findProxyForURL(URL string) string { + return "" +} diff --git a/vendor/github.com/mattn/go-ieproxy/pac_windows.go b/vendor/github.com/mattn/go-ieproxy/pac_windows.go new file mode 100644 index 000000000..6a2ee6778 --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/pac_windows.go @@ -0,0 +1,72 @@ +package ieproxy + +import ( + "strings" + "syscall" + "unsafe" +) + +func (psc *ProxyScriptConf) findProxyForURL(URL string) string { + if !psc.Active { + return "" + } + proxy, _ := getProxyForURL(psc.PreConfiguredURL, URL) + i := strings.Index(proxy, ";") + if i >= 0 { + return proxy[:i] + } + return proxy +} + +func getProxyForURL(pacfileURL, URL string) (string, error) { + pacfileURLPtr, err := syscall.UTF16PtrFromString(pacfileURL) + if err != nil { + return "", err + } + URLPtr, err := syscall.UTF16PtrFromString(URL) + if err != nil { + return "", err + } + + handle, _, err := winHttpOpen.Call(0, 0, 0, 0, 0) + if handle == 0 { + return "", err + } + defer winHttpCloseHandle.Call(handle) + + dwFlags := fWINHTTP_AUTOPROXY_CONFIG_URL + dwAutoDetectFlags := autoDetectFlag(0) + pfURLptr := pacfileURLPtr + + if pacfileURL == "" { + dwFlags = fWINHTTP_AUTOPROXY_AUTO_DETECT + dwAutoDetectFlags = fWINHTTP_AUTO_DETECT_TYPE_DNS_A | fWINHTTP_AUTO_DETECT_TYPE_DHCP + pfURLptr = nil + } + + options := tWINHTTP_AUTOPROXY_OPTIONS{ + dwFlags: dwFlags, // adding cache might cause issues: https://github.com/mattn/go-ieproxy/issues/6 + dwAutoDetectFlags: dwAutoDetectFlags, + lpszAutoConfigUrl: pfURLptr, + lpvReserved: nil, + dwReserved: 0, + fAutoLogonIfChallenged: true, // may not be optimal https://msdn.microsoft.com/en-us/library/windows/desktop/aa383153(v=vs.85).aspx + } // lpszProxyBypass isn't used as this only executes in cases where there (may) be a pac file (autodetect can fail), where lpszProxyBypass couldn't be returned. + // in the case that autodetect fails and no pre-specified pacfile is present, no proxy is returned. + + info := new(tWINHTTP_PROXY_INFO) + + ret, _, err := winHttpGetProxyForURL.Call( + handle, + uintptr(unsafe.Pointer(URLPtr)), + uintptr(unsafe.Pointer(&options)), + uintptr(unsafe.Pointer(info)), + ) + if ret > 0 { + err = nil + } + + defer globalFreeWrapper(info.lpszProxyBypass) + defer globalFreeWrapper(info.lpszProxy) + return StringFromUTF16Ptr(info.lpszProxy), err +} diff --git a/vendor/github.com/mattn/go-ieproxy/proxy_middleman.go b/vendor/github.com/mattn/go-ieproxy/proxy_middleman.go new file mode 100644 index 000000000..b2ff9147b --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/proxy_middleman.go @@ -0,0 +1,11 @@ +package ieproxy + +import ( + "net/http" + "net/url" +) + +// GetProxyFunc is a forwarder for the OS-Exclusive proxyMiddleman_os.go files +func GetProxyFunc() func(*http.Request) (*url.URL, error) { + return proxyMiddleman() +} diff --git a/vendor/github.com/mattn/go-ieproxy/proxy_middleman_darwin.go b/vendor/github.com/mattn/go-ieproxy/proxy_middleman_darwin.go new file mode 100644 index 000000000..6ad98b288 --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/proxy_middleman_darwin.go @@ -0,0 +1,46 @@ +//go:build !ios && !iossimulator +// +build !ios,!iossimulator + +package ieproxy + +import ( + "net/http" + "net/url" + + "golang.org/x/net/http/httpproxy" +) + +func proxyMiddleman() func(req *http.Request) (i *url.URL, e error) { + // Get the proxy configuration + conf := GetConf() + envCfg := httpproxy.FromEnvironment() + + if envCfg.HTTPProxy != "" || envCfg.HTTPSProxy != "" { + // If the user manually specifies environment variables, prefer those over the MacOS config. + return http.ProxyFromEnvironment + } + + return func(req *http.Request) (i *url.URL, e error) { + if conf.Automatic.Active { + host := conf.Automatic.FindProxyForURL(req.URL.String()) + if host != "" { + return &url.URL{Host: host}, nil + } + } + if conf.Static.Active { + return staticProxy(conf, req) + } + // Should return no proxy; fallthrough. + return http.ProxyFromEnvironment(req) + } +} + +func staticProxy(conf ProxyConf, req *http.Request) (i *url.URL, e error) { + // If static proxy obtaining is specified + proxy := httpproxy.Config{ + HTTPSProxy: conf.Static.Protocols["https"], + HTTPProxy: conf.Static.Protocols["http"], + NoProxy: conf.Static.NoProxy, + } + return proxy.ProxyFunc()(req.URL) +} diff --git a/vendor/github.com/mattn/go-ieproxy/proxy_middleman_ios.go b/vendor/github.com/mattn/go-ieproxy/proxy_middleman_ios.go new file mode 100644 index 000000000..5308e5552 --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/proxy_middleman_ios.go @@ -0,0 +1,14 @@ +//go:build ios || iossimulator +// +build ios iossimulator + +package ieproxy + +import ( + "net/http" + "net/url" +) + +func proxyMiddleman() func(req *http.Request) (i *url.URL, e error) { + // Fallthrough to ProxyFromEnvironment on all other OSes. + return http.ProxyFromEnvironment +} diff --git a/vendor/github.com/mattn/go-ieproxy/proxy_middleman_unix.go b/vendor/github.com/mattn/go-ieproxy/proxy_middleman_unix.go new file mode 100644 index 000000000..fe227a12e --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/proxy_middleman_unix.go @@ -0,0 +1,14 @@ +//go:build !windows && !darwin +// +build !windows,!darwin + +package ieproxy + +import ( + "net/http" + "net/url" +) + +func proxyMiddleman() func(req *http.Request) (i *url.URL, e error) { + // Fallthrough to ProxyFromEnvironment on all other OSes. + return http.ProxyFromEnvironment +} diff --git a/vendor/github.com/mattn/go-ieproxy/proxy_middleman_windows.go b/vendor/github.com/mattn/go-ieproxy/proxy_middleman_windows.go new file mode 100644 index 000000000..7d314dbf9 --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/proxy_middleman_windows.go @@ -0,0 +1,52 @@ +package ieproxy + +import ( + "net/http" + "net/url" + + "golang.org/x/net/http/httpproxy" +) + +func proxyMiddleman() func(req *http.Request) (i *url.URL, e error) { + // Get the proxy configuration + conf := GetConf() + envcfg := httpproxy.FromEnvironment() + + if envcfg.HTTPProxy != "" || envcfg.HTTPSProxy != "" { + // If the user manually specifies environment variables, prefer those over the Windows config. + return http.ProxyFromEnvironment + } + + return func(req *http.Request) (i *url.URL, e error) { + if conf.Automatic.Active { + host := conf.Automatic.FindProxyForURL(req.URL.String()) + if host != "" { + return &url.URL{Host: host}, nil + } + } + if conf.Static.Active { + return staticProxy(conf, req) + } + // Should return no proxy; fallthrough. + return http.ProxyFromEnvironment(req) + } +} + +func staticProxy(conf ProxyConf, req *http.Request) (i *url.URL, e error) { + // If static proxy obtaining is specified + prox := httpproxy.Config{ + HTTPSProxy: mapFallback("https", "", conf.Static.Protocols), + HTTPProxy: mapFallback("http", "", conf.Static.Protocols), + NoProxy: conf.Static.NoProxy, + } + return prox.ProxyFunc()(req.URL) +} + +// Return oKey or fbKey if oKey doesn't exist in the map. +func mapFallback(oKey, fbKey string, m map[string]string) string { + if v, ok := m[oKey]; ok { + return v + } else { + return m[fbKey] + } +} diff --git a/vendor/github.com/mattn/go-ieproxy/utils.go b/vendor/github.com/mattn/go-ieproxy/utils.go new file mode 100644 index 000000000..353b23112 --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/utils.go @@ -0,0 +1,23 @@ +package ieproxy + +import ( + "unicode/utf16" + "unsafe" +) + +// StringFromUTF16Ptr converts a *uint16 C string to a Go String +func StringFromUTF16Ptr(s *uint16) string { + if s == nil { + return "" + } + + p := (*[1<<30 - 1]uint16)(unsafe.Pointer(s)) + + // find the string length + sz := 0 + for p[sz] != 0 { + sz++ + } + + return string(utf16.Decode(p[:sz:sz])) +} diff --git a/vendor/github.com/mattn/go-ieproxy/winhttp_data_windows.go b/vendor/github.com/mattn/go-ieproxy/winhttp_data_windows.go new file mode 100644 index 000000000..4d3b16778 --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/winhttp_data_windows.go @@ -0,0 +1,51 @@ +package ieproxy + +import "golang.org/x/sys/windows" + +var winHttp = windows.NewLazySystemDLL("winhttp.dll") +var winHttpGetProxyForURL = winHttp.NewProc("WinHttpGetProxyForUrl") +var winHttpOpen = winHttp.NewProc("WinHttpOpen") +var winHttpCloseHandle = winHttp.NewProc("WinHttpCloseHandle") +var winHttpGetIEProxyConfigForCurrentUser = winHttp.NewProc("WinHttpGetIEProxyConfigForCurrentUser") +var winHttpGetDefaultProxyConfiguration = winHttp.NewProc("WinHttpGetDefaultProxyConfiguration") + +type tWINHTTP_AUTOPROXY_OPTIONS struct { + dwFlags autoProxyFlag + dwAutoDetectFlags autoDetectFlag + lpszAutoConfigUrl *uint16 + lpvReserved *uint16 + dwReserved uint32 + fAutoLogonIfChallenged bool +} +type autoProxyFlag uint32 + +const ( + fWINHTTP_AUTOPROXY_AUTO_DETECT = autoProxyFlag(0x00000001) + fWINHTTP_AUTOPROXY_CONFIG_URL = autoProxyFlag(0x00000002) + fWINHTTP_AUTOPROXY_NO_CACHE_CLIENT = autoProxyFlag(0x00080000) + fWINHTTP_AUTOPROXY_NO_CACHE_SVC = autoProxyFlag(0x00100000) + fWINHTTP_AUTOPROXY_NO_DIRECTACCESS = autoProxyFlag(0x00040000) + fWINHTTP_AUTOPROXY_RUN_INPROCESS = autoProxyFlag(0x00010000) + fWINHTTP_AUTOPROXY_RUN_OUTPROCESS_ONLY = autoProxyFlag(0x00020000) + fWINHTTP_AUTOPROXY_SORT_RESULTS = autoProxyFlag(0x00400000) +) + +type autoDetectFlag uint32 + +const ( + fWINHTTP_AUTO_DETECT_TYPE_DHCP = autoDetectFlag(0x00000001) + fWINHTTP_AUTO_DETECT_TYPE_DNS_A = autoDetectFlag(0x00000002) +) + +type tWINHTTP_PROXY_INFO struct { + dwAccessType uint32 + lpszProxy *uint16 + lpszProxyBypass *uint16 +} + +type tWINHTTP_CURRENT_USER_IE_PROXY_CONFIG struct { + fAutoDetect bool + lpszAutoConfigUrl *uint16 + lpszProxy *uint16 + lpszProxyBypass *uint16 +} diff --git a/vendor/github.com/pkg/sftp/.gitignore b/vendor/github.com/pkg/sftp/.gitignore new file mode 100644 index 000000000..caf2dca22 --- /dev/null +++ b/vendor/github.com/pkg/sftp/.gitignore @@ -0,0 +1,10 @@ +.*.swo +.*.swp + +server_standalone/server_standalone + +examples/*/id_rsa +examples/*/id_rsa.pub + +memprofile.out +memprofile.svg diff --git a/vendor/github.com/pkg/sftp/CONTRIBUTORS b/vendor/github.com/pkg/sftp/CONTRIBUTORS new file mode 100644 index 000000000..5c7196ae6 --- /dev/null +++ b/vendor/github.com/pkg/sftp/CONTRIBUTORS @@ -0,0 +1,3 @@ +Dave Cheney +Saulius Gurklys +John Eikenberry diff --git a/vendor/github.com/pkg/sftp/LICENSE b/vendor/github.com/pkg/sftp/LICENSE new file mode 100644 index 000000000..b7b53921e --- /dev/null +++ b/vendor/github.com/pkg/sftp/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) 2013, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/sftp/Makefile b/vendor/github.com/pkg/sftp/Makefile new file mode 100644 index 000000000..4d3a00799 --- /dev/null +++ b/vendor/github.com/pkg/sftp/Makefile @@ -0,0 +1,27 @@ +.PHONY: integration integration_w_race benchmark + +integration: + go test -integration -v ./... + go test -testserver -v ./... + go test -integration -testserver -v ./... + go test -integration -allocator -v ./... + go test -testserver -allocator -v ./... + go test -integration -testserver -allocator -v ./... + +integration_w_race: + go test -race -integration -v ./... + go test -race -testserver -v ./... + go test -race -integration -testserver -v ./... + go test -race -integration -allocator -v ./... + go test -race -testserver -allocator -v ./... + go test -race -integration -allocator -testserver -v ./... + +COUNT ?= 1 +BENCHMARK_PATTERN ?= "." + +benchmark: + go test -integration -run=NONE -bench=$(BENCHMARK_PATTERN) -benchmem -count=$(COUNT) + +benchmark_w_memprofile: + go test -integration -run=NONE -bench=$(BENCHMARK_PATTERN) -benchmem -count=$(COUNT) -memprofile memprofile.out + go tool pprof -svg -output=memprofile.svg memprofile.out diff --git a/vendor/github.com/pkg/sftp/README.md b/vendor/github.com/pkg/sftp/README.md new file mode 100644 index 000000000..5e78cd396 --- /dev/null +++ b/vendor/github.com/pkg/sftp/README.md @@ -0,0 +1,44 @@ +sftp +---- + +The `sftp` package provides support for file system operations on remote ssh +servers using the SFTP subsystem. It also implements an SFTP server for serving +files from the filesystem. + +![CI Status](https://github.com/pkg/sftp/workflows/CI/badge.svg?branch=master&event=push) [![Go Reference](https://pkg.go.dev/badge/github.com/pkg/sftp.svg)](https://pkg.go.dev/github.com/pkg/sftp) + +usage and examples +------------------ + +See [https://pkg.go.dev/github.com/pkg/sftp](https://pkg.go.dev/github.com/pkg/sftp) for +examples and usage. + +The basic operation of the package mirrors the facilities of the +[os](http://golang.org/pkg/os) package. + +The Walker interface for directory traversal is heavily inspired by Keith +Rarick's [fs](https://pkg.go.dev/github.com/kr/fs) package. + +roadmap +------- + +* There is way too much duplication in the Client methods. If there was an + unmarshal(interface{}) method this would reduce a heap of the duplication. + +contributing +------------ + +We welcome pull requests, bug fixes and issue reports. + +Before proposing a large change, first please discuss your change by raising an +issue. + +For API/code bugs, please include a small, self contained code example to +reproduce the issue. For pull requests, remember test coverage. + +We try to handle issues and pull requests with a 0 open philosophy. That means +we will try to address the submission as soon as possible and will work toward +a resolution. If progress can no longer be made (eg. unreproducible bug) or +stops (eg. unresponsive submitter), we will close the bug. + +Thanks. diff --git a/vendor/github.com/pkg/sftp/allocator.go b/vendor/github.com/pkg/sftp/allocator.go new file mode 100644 index 000000000..3e67e5433 --- /dev/null +++ b/vendor/github.com/pkg/sftp/allocator.go @@ -0,0 +1,96 @@ +package sftp + +import ( + "sync" +) + +type allocator struct { + sync.Mutex + available [][]byte + // map key is the request order + used map[uint32][][]byte +} + +func newAllocator() *allocator { + return &allocator{ + // micro optimization: initialize available pages with an initial capacity + available: make([][]byte, 0, SftpServerWorkerCount*2), + used: make(map[uint32][][]byte), + } +} + +// GetPage returns a previously allocated and unused []byte or create a new one. +// The slice have a fixed size = maxMsgLength, this value is suitable for both +// receiving new packets and reading the files to serve +func (a *allocator) GetPage(requestOrderID uint32) []byte { + a.Lock() + defer a.Unlock() + + var result []byte + + // get an available page and remove it from the available ones. + if len(a.available) > 0 { + truncLength := len(a.available) - 1 + result = a.available[truncLength] + + a.available[truncLength] = nil // clear out the internal pointer + a.available = a.available[:truncLength] // truncate the slice + } + + // no preallocated slice found, just allocate a new one + if result == nil { + result = make([]byte, maxMsgLength) + } + + // put result in used pages + a.used[requestOrderID] = append(a.used[requestOrderID], result) + + return result +} + +// ReleasePages marks unused all pages in use for the given requestID +func (a *allocator) ReleasePages(requestOrderID uint32) { + a.Lock() + defer a.Unlock() + + if used := a.used[requestOrderID]; len(used) > 0 { + a.available = append(a.available, used...) + } + delete(a.used, requestOrderID) +} + +// Free removes all the used and available pages. +// Call this method when the allocator is not needed anymore +func (a *allocator) Free() { + a.Lock() + defer a.Unlock() + + a.available = nil + a.used = make(map[uint32][][]byte) +} + +func (a *allocator) countUsedPages() int { + a.Lock() + defer a.Unlock() + + num := 0 + for _, p := range a.used { + num += len(p) + } + return num +} + +func (a *allocator) countAvailablePages() int { + a.Lock() + defer a.Unlock() + + return len(a.available) +} + +func (a *allocator) isRequestOrderIDUsed(requestOrderID uint32) bool { + a.Lock() + defer a.Unlock() + + _, ok := a.used[requestOrderID] + return ok +} diff --git a/vendor/github.com/pkg/sftp/attrs.go b/vendor/github.com/pkg/sftp/attrs.go new file mode 100644 index 000000000..758cd4ff9 --- /dev/null +++ b/vendor/github.com/pkg/sftp/attrs.go @@ -0,0 +1,121 @@ +package sftp + +// ssh_FXP_ATTRS support +// see https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-5 + +import ( + "os" + "time" +) + +const ( + sshFileXferAttrSize = 0x00000001 + sshFileXferAttrUIDGID = 0x00000002 + sshFileXferAttrPermissions = 0x00000004 + sshFileXferAttrACmodTime = 0x00000008 + sshFileXferAttrExtended = 0x80000000 + + sshFileXferAttrAll = sshFileXferAttrSize | sshFileXferAttrUIDGID | sshFileXferAttrPermissions | + sshFileXferAttrACmodTime | sshFileXferAttrExtended +) + +// fileInfo is an artificial type designed to satisfy os.FileInfo. +type fileInfo struct { + name string + stat *FileStat +} + +// Name returns the base name of the file. +func (fi *fileInfo) Name() string { return fi.name } + +// Size returns the length in bytes for regular files; system-dependent for others. +func (fi *fileInfo) Size() int64 { return int64(fi.stat.Size) } + +// Mode returns file mode bits. +func (fi *fileInfo) Mode() os.FileMode { return toFileMode(fi.stat.Mode) } + +// ModTime returns the last modification time of the file. +func (fi *fileInfo) ModTime() time.Time { return time.Unix(int64(fi.stat.Mtime), 0) } + +// IsDir returns true if the file is a directory. +func (fi *fileInfo) IsDir() bool { return fi.Mode().IsDir() } + +func (fi *fileInfo) Sys() interface{} { return fi.stat } + +// FileStat holds the original unmarshalled values from a call to READDIR or +// *STAT. It is exported for the purposes of accessing the raw values via +// os.FileInfo.Sys(). It is also used server side to store the unmarshalled +// values for SetStat. +type FileStat struct { + Size uint64 + Mode uint32 + Mtime uint32 + Atime uint32 + UID uint32 + GID uint32 + Extended []StatExtended +} + +// StatExtended contains additional, extended information for a FileStat. +type StatExtended struct { + ExtType string + ExtData string +} + +func fileInfoFromStat(stat *FileStat, name string) os.FileInfo { + return &fileInfo{ + name: name, + stat: stat, + } +} + +// FileInfoUidGid extends os.FileInfo and adds callbacks for Uid and Gid retrieval, +// as an alternative to *syscall.Stat_t objects on unix systems. +type FileInfoUidGid interface { + os.FileInfo + Uid() uint32 + Gid() uint32 +} + +// FileInfoUidGid extends os.FileInfo and adds a callbacks for extended data retrieval. +type FileInfoExtendedData interface { + os.FileInfo + Extended() []StatExtended +} + +func fileStatFromInfo(fi os.FileInfo) (uint32, *FileStat) { + mtime := fi.ModTime().Unix() + atime := mtime + var flags uint32 = sshFileXferAttrSize | + sshFileXferAttrPermissions | + sshFileXferAttrACmodTime + + fileStat := &FileStat{ + Size: uint64(fi.Size()), + Mode: fromFileMode(fi.Mode()), + Mtime: uint32(mtime), + Atime: uint32(atime), + } + + // os specific file stat decoding + fileStatFromInfoOs(fi, &flags, fileStat) + + // The call above will include the sshFileXferAttrUIDGID in case + // the os.FileInfo can be casted to *syscall.Stat_t on unix. + // If fi implements FileInfoUidGid, retrieve Uid, Gid from it instead. + if fiExt, ok := fi.(FileInfoUidGid); ok { + flags |= sshFileXferAttrUIDGID + fileStat.UID = fiExt.Uid() + fileStat.GID = fiExt.Gid() + } + + // if fi implements FileInfoExtendedData, retrieve extended data from it + if fiExt, ok := fi.(FileInfoExtendedData); ok { + fileStat.Extended = fiExt.Extended() + if len(fileStat.Extended) > 0 { + flags |= sshFileXferAttrExtended + } + } + + return flags, fileStat +} diff --git a/vendor/github.com/pkg/sftp/attrs_stubs.go b/vendor/github.com/pkg/sftp/attrs_stubs.go new file mode 100644 index 000000000..d20348c11 --- /dev/null +++ b/vendor/github.com/pkg/sftp/attrs_stubs.go @@ -0,0 +1,12 @@ +//go:build plan9 || windows || android +// +build plan9 windows android + +package sftp + +import ( + "os" +) + +func fileStatFromInfoOs(fi os.FileInfo, flags *uint32, fileStat *FileStat) { + // todo +} diff --git a/vendor/github.com/pkg/sftp/attrs_unix.go b/vendor/github.com/pkg/sftp/attrs_unix.go new file mode 100644 index 000000000..371ae9b9b --- /dev/null +++ b/vendor/github.com/pkg/sftp/attrs_unix.go @@ -0,0 +1,17 @@ +//go:build darwin || dragonfly || freebsd || (!android && linux) || netbsd || openbsd || solaris || aix || js +// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris aix js + +package sftp + +import ( + "os" + "syscall" +) + +func fileStatFromInfoOs(fi os.FileInfo, flags *uint32, fileStat *FileStat) { + if statt, ok := fi.Sys().(*syscall.Stat_t); ok { + *flags |= sshFileXferAttrUIDGID + fileStat.UID = statt.Uid + fileStat.GID = statt.Gid + } +} diff --git a/vendor/github.com/pkg/sftp/client.go b/vendor/github.com/pkg/sftp/client.go new file mode 100644 index 000000000..0df125e15 --- /dev/null +++ b/vendor/github.com/pkg/sftp/client.go @@ -0,0 +1,2030 @@ +package sftp + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "os" + "path" + "sync" + "sync/atomic" + "syscall" + "time" + + "github.com/kr/fs" + "golang.org/x/crypto/ssh" +) + +var ( + // ErrInternalInconsistency indicates the packets sent and the data queued to be + // written to the file don't match up. It is an unusual error and usually is + // caused by bad behavior server side or connection issues. The error is + // limited in scope to the call where it happened, the client object is still + // OK to use as long as the connection is still open. + ErrInternalInconsistency = errors.New("internal inconsistency") + // InternalInconsistency alias for ErrInternalInconsistency. + // + // Deprecated: please use ErrInternalInconsistency + InternalInconsistency = ErrInternalInconsistency +) + +// A ClientOption is a function which applies configuration to a Client. +type ClientOption func(*Client) error + +// MaxPacketChecked sets the maximum size of the payload, measured in bytes. +// This option only accepts sizes servers should support, ie. <= 32768 bytes. +// +// If you get the error "failed to send packet header: EOF" when copying a +// large file, try lowering this number. +// +// The default packet size is 32768 bytes. +func MaxPacketChecked(size int) ClientOption { + return func(c *Client) error { + if size < 1 { + return errors.New("size must be greater or equal to 1") + } + if size > 32768 { + return errors.New("sizes larger than 32KB might not work with all servers") + } + c.maxPacket = size + return nil + } +} + +// MaxPacketUnchecked sets the maximum size of the payload, measured in bytes. +// It accepts sizes larger than the 32768 bytes all servers should support. +// Only use a setting higher than 32768 if your application always connects to +// the same server or after sufficiently broad testing. +// +// If you get the error "failed to send packet header: EOF" when copying a +// large file, try lowering this number. +// +// The default packet size is 32768 bytes. +func MaxPacketUnchecked(size int) ClientOption { + return func(c *Client) error { + if size < 1 { + return errors.New("size must be greater or equal to 1") + } + c.maxPacket = size + return nil + } +} + +// MaxPacket sets the maximum size of the payload, measured in bytes. +// This option only accepts sizes servers should support, ie. <= 32768 bytes. +// This is a synonym for MaxPacketChecked that provides backward compatibility. +// +// If you get the error "failed to send packet header: EOF" when copying a +// large file, try lowering this number. +// +// The default packet size is 32768 bytes. +func MaxPacket(size int) ClientOption { + return MaxPacketChecked(size) +} + +// MaxConcurrentRequestsPerFile sets the maximum concurrent requests allowed for a single file. +// +// The default maximum concurrent requests is 64. +func MaxConcurrentRequestsPerFile(n int) ClientOption { + return func(c *Client) error { + if n < 1 { + return errors.New("n must be greater or equal to 1") + } + c.maxConcurrentRequests = n + return nil + } +} + +// UseConcurrentWrites allows the Client to perform concurrent Writes. +// +// Using concurrency while doing writes, requires special consideration. +// A write to a later offset in a file after an error, +// could end up with a file length longer than what was successfully written. +// +// When using this option, if you receive an error during `io.Copy` or `io.WriteTo`, +// you may need to `Truncate` the target Writer to avoid “holes” in the data written. +func UseConcurrentWrites(value bool) ClientOption { + return func(c *Client) error { + c.useConcurrentWrites = value + return nil + } +} + +// UseConcurrentReads allows the Client to perform concurrent Reads. +// +// Concurrent reads are generally safe to use and not using them will degrade +// performance, so this option is enabled by default. +// +// When enabled, WriteTo will use Stat/Fstat to get the file size and determines +// how many concurrent workers to use. +// Some "read once" servers will delete the file if they receive a stat call on an +// open file and then the download will fail. +// Disabling concurrent reads you will be able to download files from these servers. +// If concurrent reads are disabled, the UseFstat option is ignored. +func UseConcurrentReads(value bool) ClientOption { + return func(c *Client) error { + c.disableConcurrentReads = !value + return nil + } +} + +// UseFstat sets whether to use Fstat or Stat when File.WriteTo is called +// (usually when copying files). +// Some servers limit the amount of open files and calling Stat after opening +// the file will throw an error From the server. Setting this flag will call +// Fstat instead of Stat which is suppose to be called on an open file handle. +// +// It has been found that that with IBM Sterling SFTP servers which have +// "extractability" level set to 1 which means only 1 file can be opened at +// any given time. +// +// If the server you are working with still has an issue with both Stat and +// Fstat calls you can always open a file and read it until the end. +// +// Another reason to read the file until its end and Fstat doesn't work is +// that in some servers, reading a full file will automatically delete the +// file as some of these mainframes map the file to a message in a queue. +// Once the file has been read it will get deleted. +func UseFstat(value bool) ClientOption { + return func(c *Client) error { + c.useFstat = value + return nil + } +} + +// Client represents an SFTP session on a *ssh.ClientConn SSH connection. +// Multiple Clients can be active on a single SSH connection, and a Client +// may be called concurrently from multiple Goroutines. +// +// Client implements the github.com/kr/fs.FileSystem interface. +type Client struct { + clientConn + + ext map[string]string // Extensions (name -> data). + + maxPacket int // max packet size read or written. + maxConcurrentRequests int + nextid uint32 + + // write concurrency is… error prone. + // Default behavior should be to not use it. + useConcurrentWrites bool + useFstat bool + disableConcurrentReads bool +} + +// NewClient creates a new SFTP client on conn, using zero or more option +// functions. +func NewClient(conn *ssh.Client, opts ...ClientOption) (*Client, error) { + s, err := conn.NewSession() + if err != nil { + return nil, err + } + if err := s.RequestSubsystem("sftp"); err != nil { + return nil, err + } + pw, err := s.StdinPipe() + if err != nil { + return nil, err + } + pr, err := s.StdoutPipe() + if err != nil { + return nil, err + } + + return NewClientPipe(pr, pw, opts...) +} + +// NewClientPipe creates a new SFTP client given a Reader and a WriteCloser. +// This can be used for connecting to an SFTP server over TCP/TLS or by using +// the system's ssh client program (e.g. via exec.Command). +func NewClientPipe(rd io.Reader, wr io.WriteCloser, opts ...ClientOption) (*Client, error) { + sftp := &Client{ + clientConn: clientConn{ + conn: conn{ + Reader: rd, + WriteCloser: wr, + }, + inflight: make(map[uint32]chan<- result), + closed: make(chan struct{}), + }, + + ext: make(map[string]string), + + maxPacket: 1 << 15, + maxConcurrentRequests: 64, + } + + for _, opt := range opts { + if err := opt(sftp); err != nil { + wr.Close() + return nil, err + } + } + + if err := sftp.sendInit(); err != nil { + wr.Close() + return nil, fmt.Errorf("error sending init packet to server: %w", err) + } + + if err := sftp.recvVersion(); err != nil { + wr.Close() + return nil, fmt.Errorf("error receiving version packet from server: %w", err) + } + + sftp.clientConn.wg.Add(1) + go func() { + defer sftp.clientConn.wg.Done() + + if err := sftp.clientConn.recv(); err != nil { + sftp.clientConn.broadcastErr(err) + } + }() + + return sftp, nil +} + +// Create creates the named file mode 0666 (before umask), truncating it if it +// already exists. If successful, methods on the returned File can be used for +// I/O; the associated file descriptor has mode O_RDWR. If you need more +// control over the flags/mode used to open the file see client.OpenFile. +// +// Note that some SFTP servers (eg. AWS Transfer) do not support opening files +// read/write at the same time. For those services you will need to use +// `client.OpenFile(os.O_WRONLY|os.O_CREATE|os.O_TRUNC)`. +func (c *Client) Create(path string) (*File, error) { + return c.open(path, flags(os.O_RDWR|os.O_CREATE|os.O_TRUNC)) +} + +const sftpProtocolVersion = 3 // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt + +func (c *Client) sendInit() error { + return c.clientConn.conn.sendPacket(&sshFxInitPacket{ + Version: sftpProtocolVersion, // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt + }) +} + +// returns the next value of c.nextid +func (c *Client) nextID() uint32 { + return atomic.AddUint32(&c.nextid, 1) +} + +func (c *Client) recvVersion() error { + typ, data, err := c.recvPacket(0) + if err != nil { + if err == io.EOF { + return fmt.Errorf("server unexpectedly closed connection: %w", io.ErrUnexpectedEOF) + } + + return err + } + + if typ != sshFxpVersion { + return &unexpectedPacketErr{sshFxpVersion, typ} + } + + version, data, err := unmarshalUint32Safe(data) + if err != nil { + return err + } + + if version != sftpProtocolVersion { + return &unexpectedVersionErr{sftpProtocolVersion, version} + } + + for len(data) > 0 { + var ext extensionPair + ext, data, err = unmarshalExtensionPair(data) + if err != nil { + return err + } + c.ext[ext.Name] = ext.Data + } + + return nil +} + +// HasExtension checks whether the server supports a named extension. +// +// The first return value is the extension data reported by the server +// (typically a version number). +func (c *Client) HasExtension(name string) (string, bool) { + data, ok := c.ext[name] + return data, ok +} + +// Walk returns a new Walker rooted at root. +func (c *Client) Walk(root string) *fs.Walker { + return fs.WalkFS(root, c) +} + +// ReadDir reads the directory named by dirname and returns a list of +// directory entries. +func (c *Client) ReadDir(p string) ([]os.FileInfo, error) { + handle, err := c.opendir(p) + if err != nil { + return nil, err + } + defer c.close(handle) // this has to defer earlier than the lock below + var attrs []os.FileInfo + var done = false + for !done { + id := c.nextID() + typ, data, err1 := c.sendPacket(nil, &sshFxpReaddirPacket{ + ID: id, + Handle: handle, + }) + if err1 != nil { + err = err1 + done = true + break + } + switch typ { + case sshFxpName: + sid, data := unmarshalUint32(data) + if sid != id { + return nil, &unexpectedIDErr{id, sid} + } + count, data := unmarshalUint32(data) + for i := uint32(0); i < count; i++ { + var filename string + filename, data = unmarshalString(data) + _, data = unmarshalString(data) // discard longname + var attr *FileStat + attr, data = unmarshalAttrs(data) + if filename == "." || filename == ".." { + continue + } + attrs = append(attrs, fileInfoFromStat(attr, path.Base(filename))) + } + case sshFxpStatus: + // TODO(dfc) scope warning! + err = normaliseError(unmarshalStatus(id, data)) + done = true + default: + return nil, unimplementedPacketErr(typ) + } + } + if err == io.EOF { + err = nil + } + return attrs, err +} + +func (c *Client) opendir(path string) (string, error) { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpOpendirPacket{ + ID: id, + Path: path, + }) + if err != nil { + return "", err + } + switch typ { + case sshFxpHandle: + sid, data := unmarshalUint32(data) + if sid != id { + return "", &unexpectedIDErr{id, sid} + } + handle, _ := unmarshalString(data) + return handle, nil + case sshFxpStatus: + return "", normaliseError(unmarshalStatus(id, data)) + default: + return "", unimplementedPacketErr(typ) + } +} + +// Stat returns a FileInfo structure describing the file specified by path 'p'. +// If 'p' is a symbolic link, the returned FileInfo structure describes the referent file. +func (c *Client) Stat(p string) (os.FileInfo, error) { + fs, err := c.stat(p) + if err != nil { + return nil, err + } + return fileInfoFromStat(fs, path.Base(p)), nil +} + +// Lstat returns a FileInfo structure describing the file specified by path 'p'. +// If 'p' is a symbolic link, the returned FileInfo structure describes the symbolic link. +func (c *Client) Lstat(p string) (os.FileInfo, error) { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpLstatPacket{ + ID: id, + Path: p, + }) + if err != nil { + return nil, err + } + switch typ { + case sshFxpAttrs: + sid, data := unmarshalUint32(data) + if sid != id { + return nil, &unexpectedIDErr{id, sid} + } + attr, _ := unmarshalAttrs(data) + return fileInfoFromStat(attr, path.Base(p)), nil + case sshFxpStatus: + return nil, normaliseError(unmarshalStatus(id, data)) + default: + return nil, unimplementedPacketErr(typ) + } +} + +// ReadLink reads the target of a symbolic link. +func (c *Client) ReadLink(p string) (string, error) { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpReadlinkPacket{ + ID: id, + Path: p, + }) + if err != nil { + return "", err + } + switch typ { + case sshFxpName: + sid, data := unmarshalUint32(data) + if sid != id { + return "", &unexpectedIDErr{id, sid} + } + count, data := unmarshalUint32(data) + if count != 1 { + return "", unexpectedCount(1, count) + } + filename, _ := unmarshalString(data) // ignore dummy attributes + return filename, nil + case sshFxpStatus: + return "", normaliseError(unmarshalStatus(id, data)) + default: + return "", unimplementedPacketErr(typ) + } +} + +// Link creates a hard link at 'newname', pointing at the same inode as 'oldname' +func (c *Client) Link(oldname, newname string) error { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpHardlinkPacket{ + ID: id, + Oldpath: oldname, + Newpath: newname, + }) + if err != nil { + return err + } + switch typ { + case sshFxpStatus: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// Symlink creates a symbolic link at 'newname', pointing at target 'oldname' +func (c *Client) Symlink(oldname, newname string) error { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpSymlinkPacket{ + ID: id, + Linkpath: newname, + Targetpath: oldname, + }) + if err != nil { + return err + } + switch typ { + case sshFxpStatus: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +func (c *Client) setfstat(handle string, flags uint32, attrs interface{}) error { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpFsetstatPacket{ + ID: id, + Handle: handle, + Flags: flags, + Attrs: attrs, + }) + if err != nil { + return err + } + switch typ { + case sshFxpStatus: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// setstat is a convience wrapper to allow for changing of various parts of the file descriptor. +func (c *Client) setstat(path string, flags uint32, attrs interface{}) error { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpSetstatPacket{ + ID: id, + Path: path, + Flags: flags, + Attrs: attrs, + }) + if err != nil { + return err + } + switch typ { + case sshFxpStatus: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// Chtimes changes the access and modification times of the named file. +func (c *Client) Chtimes(path string, atime time.Time, mtime time.Time) error { + type times struct { + Atime uint32 + Mtime uint32 + } + attrs := times{uint32(atime.Unix()), uint32(mtime.Unix())} + return c.setstat(path, sshFileXferAttrACmodTime, attrs) +} + +// Chown changes the user and group owners of the named file. +func (c *Client) Chown(path string, uid, gid int) error { + type owner struct { + UID uint32 + GID uint32 + } + attrs := owner{uint32(uid), uint32(gid)} + return c.setstat(path, sshFileXferAttrUIDGID, attrs) +} + +// Chmod changes the permissions of the named file. +// +// Chmod does not apply a umask, because even retrieving the umask is not +// possible in a portable way without causing a race condition. Callers +// should mask off umask bits, if desired. +func (c *Client) Chmod(path string, mode os.FileMode) error { + return c.setstat(path, sshFileXferAttrPermissions, toChmodPerm(mode)) +} + +// Truncate sets the size of the named file. Although it may be safely assumed +// that if the size is less than its current size it will be truncated to fit, +// the SFTP protocol does not specify what behavior the server should do when setting +// size greater than the current size. +func (c *Client) Truncate(path string, size int64) error { + return c.setstat(path, sshFileXferAttrSize, uint64(size)) +} + +// Open opens the named file for reading. If successful, methods on the +// returned file can be used for reading; the associated file descriptor +// has mode O_RDONLY. +func (c *Client) Open(path string) (*File, error) { + return c.open(path, flags(os.O_RDONLY)) +} + +// OpenFile is the generalized open call; most users will use Open or +// Create instead. It opens the named file with specified flag (O_RDONLY +// etc.). If successful, methods on the returned File can be used for I/O. +func (c *Client) OpenFile(path string, f int) (*File, error) { + return c.open(path, flags(f)) +} + +func (c *Client) open(path string, pflags uint32) (*File, error) { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpOpenPacket{ + ID: id, + Path: path, + Pflags: pflags, + }) + if err != nil { + return nil, err + } + switch typ { + case sshFxpHandle: + sid, data := unmarshalUint32(data) + if sid != id { + return nil, &unexpectedIDErr{id, sid} + } + handle, _ := unmarshalString(data) + return &File{c: c, path: path, handle: handle}, nil + case sshFxpStatus: + return nil, normaliseError(unmarshalStatus(id, data)) + default: + return nil, unimplementedPacketErr(typ) + } +} + +// close closes a handle handle previously returned in the response +// to SSH_FXP_OPEN or SSH_FXP_OPENDIR. The handle becomes invalid +// immediately after this request has been sent. +func (c *Client) close(handle string) error { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpClosePacket{ + ID: id, + Handle: handle, + }) + if err != nil { + return err + } + switch typ { + case sshFxpStatus: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +func (c *Client) stat(path string) (*FileStat, error) { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpStatPacket{ + ID: id, + Path: path, + }) + if err != nil { + return nil, err + } + switch typ { + case sshFxpAttrs: + sid, data := unmarshalUint32(data) + if sid != id { + return nil, &unexpectedIDErr{id, sid} + } + attr, _ := unmarshalAttrs(data) + return attr, nil + case sshFxpStatus: + return nil, normaliseError(unmarshalStatus(id, data)) + default: + return nil, unimplementedPacketErr(typ) + } +} + +func (c *Client) fstat(handle string) (*FileStat, error) { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpFstatPacket{ + ID: id, + Handle: handle, + }) + if err != nil { + return nil, err + } + switch typ { + case sshFxpAttrs: + sid, data := unmarshalUint32(data) + if sid != id { + return nil, &unexpectedIDErr{id, sid} + } + attr, _ := unmarshalAttrs(data) + return attr, nil + case sshFxpStatus: + return nil, normaliseError(unmarshalStatus(id, data)) + default: + return nil, unimplementedPacketErr(typ) + } +} + +// StatVFS retrieves VFS statistics from a remote host. +// +// It implements the statvfs@openssh.com SSH_FXP_EXTENDED feature +// from http://www.opensource.apple.com/source/OpenSSH/OpenSSH-175/openssh/PROTOCOL?txt. +func (c *Client) StatVFS(path string) (*StatVFS, error) { + // send the StatVFS packet to the server + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpStatvfsPacket{ + ID: id, + Path: path, + }) + if err != nil { + return nil, err + } + + switch typ { + // server responded with valid data + case sshFxpExtendedReply: + var response StatVFS + err = binary.Read(bytes.NewReader(data), binary.BigEndian, &response) + if err != nil { + return nil, errors.New("can not parse reply") + } + + return &response, nil + + // the resquest failed + case sshFxpStatus: + return nil, normaliseError(unmarshalStatus(id, data)) + + default: + return nil, unimplementedPacketErr(typ) + } +} + +// Join joins any number of path elements into a single path, adding a +// separating slash if necessary. The result is Cleaned; in particular, all +// empty strings are ignored. +func (c *Client) Join(elem ...string) string { return path.Join(elem...) } + +// Remove removes the specified file or directory. An error will be returned if no +// file or directory with the specified path exists, or if the specified directory +// is not empty. +func (c *Client) Remove(path string) error { + err := c.removeFile(path) + // some servers, *cough* osx *cough*, return EPERM, not ENODIR. + // serv-u returns ssh_FX_FILE_IS_A_DIRECTORY + // EPERM is converted to os.ErrPermission so it is not a StatusError + if err, ok := err.(*StatusError); ok { + switch err.Code { + case sshFxFailure, sshFxFileIsADirectory: + return c.RemoveDirectory(path) + } + } + if os.IsPermission(err) { + return c.RemoveDirectory(path) + } + return err +} + +func (c *Client) removeFile(path string) error { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpRemovePacket{ + ID: id, + Filename: path, + }) + if err != nil { + return err + } + switch typ { + case sshFxpStatus: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// RemoveDirectory removes a directory path. +func (c *Client) RemoveDirectory(path string) error { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpRmdirPacket{ + ID: id, + Path: path, + }) + if err != nil { + return err + } + switch typ { + case sshFxpStatus: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// Rename renames a file. +func (c *Client) Rename(oldname, newname string) error { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpRenamePacket{ + ID: id, + Oldpath: oldname, + Newpath: newname, + }) + if err != nil { + return err + } + switch typ { + case sshFxpStatus: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// PosixRename renames a file using the posix-rename@openssh.com extension +// which will replace newname if it already exists. +func (c *Client) PosixRename(oldname, newname string) error { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpPosixRenamePacket{ + ID: id, + Oldpath: oldname, + Newpath: newname, + }) + if err != nil { + return err + } + switch typ { + case sshFxpStatus: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// RealPath can be used to have the server canonicalize any given path name to an absolute path. +// +// This is useful for converting path names containing ".." components, +// or relative pathnames without a leading slash into absolute paths. +func (c *Client) RealPath(path string) (string, error) { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpRealpathPacket{ + ID: id, + Path: path, + }) + if err != nil { + return "", err + } + switch typ { + case sshFxpName: + sid, data := unmarshalUint32(data) + if sid != id { + return "", &unexpectedIDErr{id, sid} + } + count, data := unmarshalUint32(data) + if count != 1 { + return "", unexpectedCount(1, count) + } + filename, _ := unmarshalString(data) // ignore attributes + return filename, nil + case sshFxpStatus: + return "", normaliseError(unmarshalStatus(id, data)) + default: + return "", unimplementedPacketErr(typ) + } +} + +// Getwd returns the current working directory of the server. Operations +// involving relative paths will be based at this location. +func (c *Client) Getwd() (string, error) { + return c.RealPath(".") +} + +// Mkdir creates the specified directory. An error will be returned if a file or +// directory with the specified path already exists, or if the directory's +// parent folder does not exist (the method cannot create complete paths). +func (c *Client) Mkdir(path string) error { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpMkdirPacket{ + ID: id, + Path: path, + }) + if err != nil { + return err + } + switch typ { + case sshFxpStatus: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// MkdirAll creates a directory named path, along with any necessary parents, +// and returns nil, or else returns an error. +// If path is already a directory, MkdirAll does nothing and returns nil. +// If path contains a regular file, an error is returned +func (c *Client) MkdirAll(path string) error { + // Most of this code mimics https://golang.org/src/os/path.go?s=514:561#L13 + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := c.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && path[i-1] == '/' { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && path[j-1] != '/' { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent + err = c.MkdirAll(path[0 : j-1]) + if err != nil { + return err + } + } + + // Parent now exists; invoke Mkdir and use its result. + err = c.Mkdir(path) + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := c.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} + +// RemoveAll delete files recursively in the directory and Recursively delete subdirectories. +// An error will be returned if no file or directory with the specified path exists +func (c *Client) RemoveAll(path string) error { + + // Get the file/directory information + fi, err := c.Stat(path) + if err != nil { + return err + } + + if fi.IsDir() { + // Delete files recursively in the directory + files, err := c.ReadDir(path) + if err != nil { + return err + } + + for _, file := range files { + if file.IsDir() { + // Recursively delete subdirectories + err = c.RemoveAll(path + "/" + file.Name()) + if err != nil { + return err + } + } else { + // Delete individual files + err = c.Remove(path + "/" + file.Name()) + if err != nil { + return err + } + } + } + + } + + return c.Remove(path) + +} + +// File represents a remote file. +type File struct { + c *Client + path string + handle string + + mu sync.Mutex + offset int64 // current offset within remote file +} + +// Close closes the File, rendering it unusable for I/O. It returns an +// error, if any. +func (f *File) Close() error { + return f.c.close(f.handle) +} + +// Name returns the name of the file as presented to Open or Create. +func (f *File) Name() string { + return f.path +} + +// Read reads up to len(b) bytes from the File. It returns the number of bytes +// read and an error, if any. Read follows io.Reader semantics, so when Read +// encounters an error or EOF condition after successfully reading n > 0 bytes, +// it returns the number of bytes read. +// +// To maximise throughput for transferring the entire file (especially +// over high latency links) it is recommended to use WriteTo rather +// than calling Read multiple times. io.Copy will do this +// automatically. +func (f *File) Read(b []byte) (int, error) { + f.mu.Lock() + defer f.mu.Unlock() + + n, err := f.ReadAt(b, f.offset) + f.offset += int64(n) + return n, err +} + +// readChunkAt attempts to read the whole entire length of the buffer from the file starting at the offset. +// It will continue progressively reading into the buffer until it fills the whole buffer, or an error occurs. +func (f *File) readChunkAt(ch chan result, b []byte, off int64) (n int, err error) { + for err == nil && n < len(b) { + id := f.c.nextID() + typ, data, err := f.c.sendPacket(ch, &sshFxpReadPacket{ + ID: id, + Handle: f.handle, + Offset: uint64(off) + uint64(n), + Len: uint32(len(b) - n), + }) + if err != nil { + return n, err + } + + switch typ { + case sshFxpStatus: + return n, normaliseError(unmarshalStatus(id, data)) + + case sshFxpData: + sid, data := unmarshalUint32(data) + if id != sid { + return n, &unexpectedIDErr{id, sid} + } + + l, data := unmarshalUint32(data) + n += copy(b[n:], data[:l]) + + default: + return n, unimplementedPacketErr(typ) + } + } + + return +} + +func (f *File) readAtSequential(b []byte, off int64) (read int, err error) { + for read < len(b) { + rb := b[read:] + if len(rb) > f.c.maxPacket { + rb = rb[:f.c.maxPacket] + } + n, err := f.readChunkAt(nil, rb, off+int64(read)) + if n < 0 { + panic("sftp.File: returned negative count from readChunkAt") + } + if n > 0 { + read += n + } + if err != nil { + return read, err + } + } + return read, nil +} + +// ReadAt reads up to len(b) byte from the File at a given offset `off`. It returns +// the number of bytes read and an error, if any. ReadAt follows io.ReaderAt semantics, +// so the file offset is not altered during the read. +func (f *File) ReadAt(b []byte, off int64) (int, error) { + if len(b) <= f.c.maxPacket { + // This should be able to be serviced with 1/2 requests. + // So, just do it directly. + return f.readChunkAt(nil, b, off) + } + + if f.c.disableConcurrentReads { + return f.readAtSequential(b, off) + } + + // Split the read into multiple maxPacket-sized concurrent reads bounded by maxConcurrentRequests. + // This allows writes with a suitably large buffer to transfer data at a much faster rate + // by overlapping round trip times. + + cancel := make(chan struct{}) + + concurrency := len(b)/f.c.maxPacket + 1 + if concurrency > f.c.maxConcurrentRequests || concurrency < 1 { + concurrency = f.c.maxConcurrentRequests + } + + resPool := newResChanPool(concurrency) + + type work struct { + id uint32 + res chan result + + b []byte + off int64 + } + workCh := make(chan work) + + // Slice: cut up the Read into any number of buffers of length <= f.c.maxPacket, and at appropriate offsets. + go func() { + defer close(workCh) + + b := b + offset := off + chunkSize := f.c.maxPacket + + for len(b) > 0 { + rb := b + if len(rb) > chunkSize { + rb = rb[:chunkSize] + } + + id := f.c.nextID() + res := resPool.Get() + + f.c.dispatchRequest(res, &sshFxpReadPacket{ + ID: id, + Handle: f.handle, + Offset: uint64(offset), + Len: uint32(chunkSize), + }) + + select { + case workCh <- work{id, res, rb, offset}: + case <-cancel: + return + } + + offset += int64(len(rb)) + b = b[len(rb):] + } + }() + + type rErr struct { + off int64 + err error + } + errCh := make(chan rErr) + + var wg sync.WaitGroup + wg.Add(concurrency) + for i := 0; i < concurrency; i++ { + // Map_i: each worker gets work, and then performs the Read into its buffer from its respective offset. + go func() { + defer wg.Done() + + for packet := range workCh { + var n int + + s := <-packet.res + resPool.Put(packet.res) + + err := s.err + if err == nil { + switch s.typ { + case sshFxpStatus: + err = normaliseError(unmarshalStatus(packet.id, s.data)) + + case sshFxpData: + sid, data := unmarshalUint32(s.data) + if packet.id != sid { + err = &unexpectedIDErr{packet.id, sid} + + } else { + l, data := unmarshalUint32(data) + n = copy(packet.b, data[:l]) + + // For normal disk files, it is guaranteed that this will read + // the specified number of bytes, or up to end of file. + // This implies, if we have a short read, that means EOF. + if n < len(packet.b) { + err = io.EOF + } + } + + default: + err = unimplementedPacketErr(s.typ) + } + } + + if err != nil { + // return the offset as the start + how much we read before the error. + errCh <- rErr{packet.off + int64(n), err} + return + } + } + }() + } + + // Wait for long tail, before closing results. + go func() { + wg.Wait() + close(errCh) + }() + + // Reduce: collect all the results into a relevant return: the earliest offset to return an error. + firstErr := rErr{math.MaxInt64, nil} + for rErr := range errCh { + if rErr.off <= firstErr.off { + firstErr = rErr + } + + select { + case <-cancel: + default: + // stop any more work from being distributed. (Just in case.) + close(cancel) + } + } + + if firstErr.err != nil { + // firstErr.err != nil if and only if firstErr.off > our starting offset. + return int(firstErr.off - off), firstErr.err + } + + // As per spec for io.ReaderAt, we return nil error if and only if we read everything. + return len(b), nil +} + +// writeToSequential implements WriteTo, but works sequentially with no parallelism. +func (f *File) writeToSequential(w io.Writer) (written int64, err error) { + b := make([]byte, f.c.maxPacket) + ch := make(chan result, 1) // reusable channel + + for { + n, err := f.readChunkAt(ch, b, f.offset) + if n < 0 { + panic("sftp.File: returned negative count from readChunkAt") + } + + if n > 0 { + f.offset += int64(n) + + m, err := w.Write(b[:n]) + written += int64(m) + + if err != nil { + return written, err + } + } + + if err != nil { + if err == io.EOF { + return written, nil // return nil explicitly. + } + + return written, err + } + } +} + +// WriteTo writes the file to the given Writer. +// The return value is the number of bytes written. +// Any error encountered during the write is also returned. +// +// This method is preferred over calling Read multiple times +// to maximise throughput for transferring the entire file, +// especially over high latency links. +func (f *File) WriteTo(w io.Writer) (written int64, err error) { + f.mu.Lock() + defer f.mu.Unlock() + + if f.c.disableConcurrentReads { + return f.writeToSequential(w) + } + + // For concurrency, we want to guess how many concurrent workers we should use. + var fileStat *FileStat + if f.c.useFstat { + fileStat, err = f.c.fstat(f.handle) + } else { + fileStat, err = f.c.stat(f.path) + } + if err != nil { + return 0, err + } + + fileSize := fileStat.Size + if fileSize <= uint64(f.c.maxPacket) || !isRegular(fileStat.Mode) { + // only regular files are guaranteed to return (full read) xor (partial read, next error) + return f.writeToSequential(w) + } + + concurrency64 := fileSize/uint64(f.c.maxPacket) + 1 // a bad guess, but better than no guess + if concurrency64 > uint64(f.c.maxConcurrentRequests) || concurrency64 < 1 { + concurrency64 = uint64(f.c.maxConcurrentRequests) + } + // Now that concurrency64 is saturated to an int value, we know this assignment cannot possibly overflow. + concurrency := int(concurrency64) + + chunkSize := f.c.maxPacket + pool := newBufPool(concurrency, chunkSize) + resPool := newResChanPool(concurrency) + + cancel := make(chan struct{}) + var wg sync.WaitGroup + defer func() { + // Once the writing Reduce phase has ended, all the feed work needs to unconditionally stop. + close(cancel) + + // We want to wait until all outstanding goroutines with an `f` or `f.c` reference have completed. + // Just to be sure we don’t orphan any goroutines any hanging references. + wg.Wait() + }() + + type writeWork struct { + b []byte + off int64 + err error + + next chan writeWork + } + writeCh := make(chan writeWork) + + type readWork struct { + id uint32 + res chan result + off int64 + + cur, next chan writeWork + } + readCh := make(chan readWork) + + // Slice: hand out chunks of work on demand, with a `cur` and `next` channel built-in for sequencing. + go func() { + defer close(readCh) + + off := f.offset + + cur := writeCh + for { + id := f.c.nextID() + res := resPool.Get() + + next := make(chan writeWork) + readWork := readWork{ + id: id, + res: res, + off: off, + + cur: cur, + next: next, + } + + f.c.dispatchRequest(res, &sshFxpReadPacket{ + ID: id, + Handle: f.handle, + Offset: uint64(off), + Len: uint32(chunkSize), + }) + + select { + case readCh <- readWork: + case <-cancel: + return + } + + off += int64(chunkSize) + cur = next + } + }() + + wg.Add(concurrency) + for i := 0; i < concurrency; i++ { + // Map_i: each worker gets readWork, and does the Read into a buffer at the given offset. + go func() { + defer wg.Done() + + for readWork := range readCh { + var b []byte + var n int + + s := <-readWork.res + resPool.Put(readWork.res) + + err := s.err + if err == nil { + switch s.typ { + case sshFxpStatus: + err = normaliseError(unmarshalStatus(readWork.id, s.data)) + + case sshFxpData: + sid, data := unmarshalUint32(s.data) + if readWork.id != sid { + err = &unexpectedIDErr{readWork.id, sid} + + } else { + l, data := unmarshalUint32(data) + b = pool.Get()[:l] + n = copy(b, data[:l]) + b = b[:n] + } + + default: + err = unimplementedPacketErr(s.typ) + } + } + + writeWork := writeWork{ + b: b, + off: readWork.off, + err: err, + + next: readWork.next, + } + + select { + case readWork.cur <- writeWork: + case <-cancel: + return + } + + if err != nil { + return + } + } + }() + } + + // Reduce: serialize the results from the reads into sequential writes. + cur := writeCh + for { + packet, ok := <-cur + if !ok { + return written, errors.New("sftp.File.WriteTo: unexpectedly closed channel") + } + + // Because writes are serialized, this will always be the last successfully read byte. + f.offset = packet.off + int64(len(packet.b)) + + if len(packet.b) > 0 { + n, err := w.Write(packet.b) + written += int64(n) + if err != nil { + return written, err + } + } + + if packet.err != nil { + if packet.err == io.EOF { + return written, nil + } + + return written, packet.err + } + + pool.Put(packet.b) + cur = packet.next + } +} + +// Stat returns the FileInfo structure describing file. If there is an +// error. +func (f *File) Stat() (os.FileInfo, error) { + fs, err := f.c.fstat(f.handle) + if err != nil { + return nil, err + } + return fileInfoFromStat(fs, path.Base(f.path)), nil +} + +// Write writes len(b) bytes to the File. It returns the number of bytes +// written and an error, if any. Write returns a non-nil error when n != +// len(b). +// +// To maximise throughput for transferring the entire file (especially +// over high latency links) it is recommended to use ReadFrom rather +// than calling Write multiple times. io.Copy will do this +// automatically. +func (f *File) Write(b []byte) (int, error) { + f.mu.Lock() + defer f.mu.Unlock() + + n, err := f.WriteAt(b, f.offset) + f.offset += int64(n) + return n, err +} + +func (f *File) writeChunkAt(ch chan result, b []byte, off int64) (int, error) { + typ, data, err := f.c.sendPacket(ch, &sshFxpWritePacket{ + ID: f.c.nextID(), + Handle: f.handle, + Offset: uint64(off), + Length: uint32(len(b)), + Data: b, + }) + if err != nil { + return 0, err + } + + switch typ { + case sshFxpStatus: + id, _ := unmarshalUint32(data) + err := normaliseError(unmarshalStatus(id, data)) + if err != nil { + return 0, err + } + + default: + return 0, unimplementedPacketErr(typ) + } + + return len(b), nil +} + +// writeAtConcurrent implements WriterAt, but works concurrently rather than sequentially. +func (f *File) writeAtConcurrent(b []byte, off int64) (int, error) { + // Split the write into multiple maxPacket sized concurrent writes + // bounded by maxConcurrentRequests. This allows writes with a suitably + // large buffer to transfer data at a much faster rate due to + // overlapping round trip times. + + cancel := make(chan struct{}) + + type work struct { + id uint32 + res chan result + + off int64 + } + workCh := make(chan work) + + concurrency := len(b)/f.c.maxPacket + 1 + if concurrency > f.c.maxConcurrentRequests || concurrency < 1 { + concurrency = f.c.maxConcurrentRequests + } + + pool := newResChanPool(concurrency) + + // Slice: cut up the Read into any number of buffers of length <= f.c.maxPacket, and at appropriate offsets. + go func() { + defer close(workCh) + + var read int + chunkSize := f.c.maxPacket + + for read < len(b) { + wb := b[read:] + if len(wb) > chunkSize { + wb = wb[:chunkSize] + } + + id := f.c.nextID() + res := pool.Get() + off := off + int64(read) + + f.c.dispatchRequest(res, &sshFxpWritePacket{ + ID: id, + Handle: f.handle, + Offset: uint64(off), + Length: uint32(len(wb)), + Data: wb, + }) + + select { + case workCh <- work{id, res, off}: + case <-cancel: + return + } + + read += len(wb) + } + }() + + type wErr struct { + off int64 + err error + } + errCh := make(chan wErr) + + var wg sync.WaitGroup + wg.Add(concurrency) + for i := 0; i < concurrency; i++ { + // Map_i: each worker gets work, and does the Write from each buffer to its respective offset. + go func() { + defer wg.Done() + + for work := range workCh { + s := <-work.res + pool.Put(work.res) + + err := s.err + if err == nil { + switch s.typ { + case sshFxpStatus: + err = normaliseError(unmarshalStatus(work.id, s.data)) + default: + err = unimplementedPacketErr(s.typ) + } + } + + if err != nil { + errCh <- wErr{work.off, err} + } + } + }() + } + + // Wait for long tail, before closing results. + go func() { + wg.Wait() + close(errCh) + }() + + // Reduce: collect all the results into a relevant return: the earliest offset to return an error. + firstErr := wErr{math.MaxInt64, nil} + for wErr := range errCh { + if wErr.off <= firstErr.off { + firstErr = wErr + } + + select { + case <-cancel: + default: + // stop any more work from being distributed. (Just in case.) + close(cancel) + } + } + + if firstErr.err != nil { + // firstErr.err != nil if and only if firstErr.off >= our starting offset. + return int(firstErr.off - off), firstErr.err + } + + return len(b), nil +} + +// WriteAt writes up to len(b) byte to the File at a given offset `off`. It returns +// the number of bytes written and an error, if any. WriteAt follows io.WriterAt semantics, +// so the file offset is not altered during the write. +func (f *File) WriteAt(b []byte, off int64) (written int, err error) { + if len(b) <= f.c.maxPacket { + // We can do this in one write. + return f.writeChunkAt(nil, b, off) + } + + if f.c.useConcurrentWrites { + return f.writeAtConcurrent(b, off) + } + + ch := make(chan result, 1) // reusable channel + + chunkSize := f.c.maxPacket + + for written < len(b) { + wb := b[written:] + if len(wb) > chunkSize { + wb = wb[:chunkSize] + } + + n, err := f.writeChunkAt(ch, wb, off+int64(written)) + if n > 0 { + written += n + } + + if err != nil { + return written, err + } + } + + return len(b), nil +} + +// ReadFromWithConcurrency implements ReaderFrom, +// but uses the given concurrency to issue multiple requests at the same time. +// +// Giving a concurrency of less than one will default to the Client’s max concurrency. +// +// Otherwise, the given concurrency will be capped by the Client's max concurrency. +func (f *File) ReadFromWithConcurrency(r io.Reader, concurrency int) (read int64, err error) { + // Split the write into multiple maxPacket sized concurrent writes. + // This allows writes with a suitably large reader + // to transfer data at a much faster rate due to overlapping round trip times. + + cancel := make(chan struct{}) + + type work struct { + id uint32 + res chan result + + off int64 + } + workCh := make(chan work) + + type rwErr struct { + off int64 + err error + } + errCh := make(chan rwErr) + + if concurrency > f.c.maxConcurrentRequests || concurrency < 1 { + concurrency = f.c.maxConcurrentRequests + } + + pool := newResChanPool(concurrency) + + // Slice: cut up the Read into any number of buffers of length <= f.c.maxPacket, and at appropriate offsets. + go func() { + defer close(workCh) + + b := make([]byte, f.c.maxPacket) + off := f.offset + + for { + n, err := r.Read(b) + + if n > 0 { + read += int64(n) + + id := f.c.nextID() + res := pool.Get() + + f.c.dispatchRequest(res, &sshFxpWritePacket{ + ID: id, + Handle: f.handle, + Offset: uint64(off), + Length: uint32(n), + Data: b[:n], + }) + + select { + case workCh <- work{id, res, off}: + case <-cancel: + return + } + + off += int64(n) + } + + if err != nil { + if err != io.EOF { + errCh <- rwErr{off, err} + } + return + } + } + }() + + var wg sync.WaitGroup + wg.Add(concurrency) + for i := 0; i < concurrency; i++ { + // Map_i: each worker gets work, and does the Write from each buffer to its respective offset. + go func() { + defer wg.Done() + + for work := range workCh { + s := <-work.res + pool.Put(work.res) + + err := s.err + if err == nil { + switch s.typ { + case sshFxpStatus: + err = normaliseError(unmarshalStatus(work.id, s.data)) + default: + err = unimplementedPacketErr(s.typ) + } + } + + if err != nil { + errCh <- rwErr{work.off, err} + } + } + }() + } + + // Wait for long tail, before closing results. + go func() { + wg.Wait() + close(errCh) + }() + + // Reduce: Collect all the results into a relevant return: the earliest offset to return an error. + firstErr := rwErr{math.MaxInt64, nil} + for rwErr := range errCh { + if rwErr.off <= firstErr.off { + firstErr = rwErr + } + + select { + case <-cancel: + default: + // stop any more work from being distributed. + close(cancel) + } + } + + if firstErr.err != nil { + // firstErr.err != nil if and only if firstErr.off is a valid offset. + // + // firstErr.off will then be the lesser of: + // * the offset of the first error from writing, + // * the last successfully read offset. + // + // This could be less than the last successfully written offset, + // which is the whole reason for the UseConcurrentWrites() ClientOption. + // + // Callers are responsible for truncating any SFTP files to a safe length. + f.offset = firstErr.off + + // ReadFrom is defined to return the read bytes, regardless of any writer errors. + return read, firstErr.err + } + + f.offset += read + return read, nil +} + +// ReadFrom reads data from r until EOF and writes it to the file. The return +// value is the number of bytes read. Any error except io.EOF encountered +// during the read is also returned. +// +// This method is preferred over calling Write multiple times +// to maximise throughput for transferring the entire file, +// especially over high-latency links. +func (f *File) ReadFrom(r io.Reader) (int64, error) { + f.mu.Lock() + defer f.mu.Unlock() + + if f.c.useConcurrentWrites { + var remain int64 + switch r := r.(type) { + case interface{ Len() int }: + remain = int64(r.Len()) + + case interface{ Size() int64 }: + remain = r.Size() + + case *io.LimitedReader: + remain = r.N + + case interface{ Stat() (os.FileInfo, error) }: + info, err := r.Stat() + if err == nil { + remain = info.Size() + } + } + + if remain < 0 { + // We can strongly assert that we want default max concurrency here. + return f.ReadFromWithConcurrency(r, f.c.maxConcurrentRequests) + } + + if remain > int64(f.c.maxPacket) { + // Otherwise, only use concurrency, if it would be at least two packets. + + // This is the best reasonable guess we can make. + concurrency64 := remain/int64(f.c.maxPacket) + 1 + + // We need to cap this value to an `int` size value to avoid overflow on 32-bit machines. + // So, we may as well pre-cap it to `f.c.maxConcurrentRequests`. + if concurrency64 > int64(f.c.maxConcurrentRequests) { + concurrency64 = int64(f.c.maxConcurrentRequests) + } + + return f.ReadFromWithConcurrency(r, int(concurrency64)) + } + } + + ch := make(chan result, 1) // reusable channel + + b := make([]byte, f.c.maxPacket) + + var read int64 + for { + n, err := r.Read(b) + if n < 0 { + panic("sftp.File: reader returned negative count from Read") + } + + if n > 0 { + read += int64(n) + + m, err2 := f.writeChunkAt(ch, b[:n], f.offset) + f.offset += int64(m) + + if err == nil { + err = err2 + } + } + + if err != nil { + if err == io.EOF { + return read, nil // return nil explicitly. + } + + return read, err + } + } +} + +// Seek implements io.Seeker by setting the client offset for the next Read or +// Write. It returns the next offset read. Seeking before or after the end of +// the file is undefined. Seeking relative to the end calls Stat. +func (f *File) Seek(offset int64, whence int) (int64, error) { + f.mu.Lock() + defer f.mu.Unlock() + + switch whence { + case io.SeekStart: + case io.SeekCurrent: + offset += f.offset + case io.SeekEnd: + fi, err := f.Stat() + if err != nil { + return f.offset, err + } + offset += fi.Size() + default: + return f.offset, unimplementedSeekWhence(whence) + } + + if offset < 0 { + return f.offset, os.ErrInvalid + } + + f.offset = offset + return f.offset, nil +} + +// Chown changes the uid/gid of the current file. +func (f *File) Chown(uid, gid int) error { + return f.c.Chown(f.path, uid, gid) +} + +// Chmod changes the permissions of the current file. +// +// See Client.Chmod for details. +func (f *File) Chmod(mode os.FileMode) error { + return f.c.setfstat(f.handle, sshFileXferAttrPermissions, toChmodPerm(mode)) +} + +// Sync requests a flush of the contents of a File to stable storage. +// +// Sync requires the server to support the fsync@openssh.com extension. +func (f *File) Sync() error { + id := f.c.nextID() + typ, data, err := f.c.sendPacket(nil, &sshFxpFsyncPacket{ + ID: id, + Handle: f.handle, + }) + + switch { + case err != nil: + return err + case typ == sshFxpStatus: + return normaliseError(unmarshalStatus(id, data)) + default: + return &unexpectedPacketErr{want: sshFxpStatus, got: typ} + } +} + +// Truncate sets the size of the current file. Although it may be safely assumed +// that if the size is less than its current size it will be truncated to fit, +// the SFTP protocol does not specify what behavior the server should do when setting +// size greater than the current size. +// We send a SSH_FXP_FSETSTAT here since we have a file handle +func (f *File) Truncate(size int64) error { + return f.c.setfstat(f.handle, sshFileXferAttrSize, uint64(size)) +} + +// normaliseError normalises an error into a more standard form that can be +// checked against stdlib errors like io.EOF or os.ErrNotExist. +func normaliseError(err error) error { + switch err := err.(type) { + case *StatusError: + switch err.Code { + case sshFxEOF: + return io.EOF + case sshFxNoSuchFile: + return os.ErrNotExist + case sshFxPermissionDenied: + return os.ErrPermission + case sshFxOk: + return nil + default: + return err + } + default: + return err + } +} + +// flags converts the flags passed to OpenFile into ssh flags. +// Unsupported flags are ignored. +func flags(f int) uint32 { + var out uint32 + switch f & os.O_WRONLY { + case os.O_WRONLY: + out |= sshFxfWrite + case os.O_RDONLY: + out |= sshFxfRead + } + if f&os.O_RDWR == os.O_RDWR { + out |= sshFxfRead | sshFxfWrite + } + if f&os.O_APPEND == os.O_APPEND { + out |= sshFxfAppend + } + if f&os.O_CREATE == os.O_CREATE { + out |= sshFxfCreat + } + if f&os.O_TRUNC == os.O_TRUNC { + out |= sshFxfTrunc + } + if f&os.O_EXCL == os.O_EXCL { + out |= sshFxfExcl + } + return out +} + +// toChmodPerm converts Go permission bits to POSIX permission bits. +// +// This differs from fromFileMode in that we preserve the POSIX versions of +// setuid, setgid and sticky in m, because we've historically supported those +// bits, and we mask off any non-permission bits. +func toChmodPerm(m os.FileMode) (perm uint32) { + const mask = os.ModePerm | s_ISUID | s_ISGID | s_ISVTX + perm = uint32(m & mask) + + if m&os.ModeSetuid != 0 { + perm |= s_ISUID + } + if m&os.ModeSetgid != 0 { + perm |= s_ISGID + } + if m&os.ModeSticky != 0 { + perm |= s_ISVTX + } + + return perm +} diff --git a/vendor/github.com/pkg/sftp/conn.go b/vendor/github.com/pkg/sftp/conn.go new file mode 100644 index 000000000..3bb2ba15f --- /dev/null +++ b/vendor/github.com/pkg/sftp/conn.go @@ -0,0 +1,183 @@ +package sftp + +import ( + "encoding" + "fmt" + "io" + "sync" +) + +// conn implements a bidirectional channel on which client and server +// connections are multiplexed. +type conn struct { + io.Reader + io.WriteCloser + // this is the same allocator used in packet manager + alloc *allocator + sync.Mutex // used to serialise writes to sendPacket +} + +// the orderID is used in server mode if the allocator is enabled. +// For the client mode just pass 0. +// It returns io.EOF if the connection is closed and +// there are no more packets to read. +func (c *conn) recvPacket(orderID uint32) (uint8, []byte, error) { + return recvPacket(c, c.alloc, orderID) +} + +func (c *conn) sendPacket(m encoding.BinaryMarshaler) error { + c.Lock() + defer c.Unlock() + + return sendPacket(c, m) +} + +func (c *conn) Close() error { + c.Lock() + defer c.Unlock() + return c.WriteCloser.Close() +} + +type clientConn struct { + conn + wg sync.WaitGroup + + sync.Mutex // protects inflight + inflight map[uint32]chan<- result // outstanding requests + + closed chan struct{} + err error +} + +// Wait blocks until the conn has shut down, and return the error +// causing the shutdown. It can be called concurrently from multiple +// goroutines. +func (c *clientConn) Wait() error { + <-c.closed + return c.err +} + +// Close closes the SFTP session. +func (c *clientConn) Close() error { + defer c.wg.Wait() + return c.conn.Close() +} + +// recv continuously reads from the server and forwards responses to the +// appropriate channel. +func (c *clientConn) recv() error { + defer c.conn.Close() + + for { + typ, data, err := c.recvPacket(0) + if err != nil { + return err + } + sid, _, err := unmarshalUint32Safe(data) + if err != nil { + return err + } + + ch, ok := c.getChannel(sid) + if !ok { + // This is an unexpected occurrence. Send the error + // back to all listeners so that they terminate + // gracefully. + return fmt.Errorf("sid not found: %d", sid) + } + + ch <- result{typ: typ, data: data} + } +} + +func (c *clientConn) putChannel(ch chan<- result, sid uint32) bool { + c.Lock() + defer c.Unlock() + + select { + case <-c.closed: + // already closed with broadcastErr, return error on chan. + ch <- result{err: ErrSSHFxConnectionLost} + return false + default: + } + + c.inflight[sid] = ch + return true +} + +func (c *clientConn) getChannel(sid uint32) (chan<- result, bool) { + c.Lock() + defer c.Unlock() + + ch, ok := c.inflight[sid] + delete(c.inflight, sid) + + return ch, ok +} + +// result captures the result of receiving the a packet from the server +type result struct { + typ byte + data []byte + err error +} + +type idmarshaler interface { + id() uint32 + encoding.BinaryMarshaler +} + +func (c *clientConn) sendPacket(ch chan result, p idmarshaler) (byte, []byte, error) { + if cap(ch) < 1 { + ch = make(chan result, 1) + } + + c.dispatchRequest(ch, p) + s := <-ch + return s.typ, s.data, s.err +} + +// dispatchRequest should ideally only be called by race-detection tests outside of this file, +// where you have to ensure two packets are in flight sequentially after each other. +func (c *clientConn) dispatchRequest(ch chan<- result, p idmarshaler) { + sid := p.id() + + if !c.putChannel(ch, sid) { + // already closed. + return + } + + if err := c.conn.sendPacket(p); err != nil { + if ch, ok := c.getChannel(sid); ok { + ch <- result{err: err} + } + } +} + +// broadcastErr sends an error to all goroutines waiting for a response. +func (c *clientConn) broadcastErr(err error) { + c.Lock() + defer c.Unlock() + + bcastRes := result{err: ErrSSHFxConnectionLost} + for sid, ch := range c.inflight { + ch <- bcastRes + + // Replace the chan in inflight, + // we have hijacked this chan, + // and this guarantees always-only-once sending. + c.inflight[sid] = make(chan<- result, 1) + } + + c.err = err + close(c.closed) +} + +type serverConn struct { + conn +} + +func (s *serverConn) sendError(id uint32, err error) error { + return s.sendPacket(statusFromError(id, err)) +} diff --git a/vendor/github.com/pkg/sftp/debug.go b/vendor/github.com/pkg/sftp/debug.go new file mode 100644 index 000000000..f0db14d3a --- /dev/null +++ b/vendor/github.com/pkg/sftp/debug.go @@ -0,0 +1,10 @@ +//go:build debug +// +build debug + +package sftp + +import "log" + +func debug(fmt string, args ...interface{}) { + log.Printf(fmt, args...) +} diff --git a/vendor/github.com/pkg/sftp/fuzz.go b/vendor/github.com/pkg/sftp/fuzz.go new file mode 100644 index 000000000..f2f1fc31c --- /dev/null +++ b/vendor/github.com/pkg/sftp/fuzz.go @@ -0,0 +1,23 @@ +//go:build gofuzz +// +build gofuzz + +package sftp + +import "bytes" + +type sinkfuzz struct{} + +func (*sinkfuzz) Close() error { return nil } +func (*sinkfuzz) Write(p []byte) (int, error) { return len(p), nil } + +var devnull = &sinkfuzz{} + +// To run: go-fuzz-build && go-fuzz +func Fuzz(data []byte) int { + c, err := NewClientPipe(bytes.NewReader(data), devnull) + if err != nil { + return 0 + } + c.Close() + return 1 +} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go new file mode 100644 index 000000000..3aec937f0 --- /dev/null +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go @@ -0,0 +1,296 @@ +package sshfx + +// Attributes related flags. +const ( + AttrSize = 1 << iota // SSH_FILEXFER_ATTR_SIZE + AttrUIDGID // SSH_FILEXFER_ATTR_UIDGID + AttrPermissions // SSH_FILEXFER_ATTR_PERMISSIONS + AttrACModTime // SSH_FILEXFER_ACMODTIME + + AttrExtended = 1 << 31 // SSH_FILEXFER_ATTR_EXTENDED +) + +// Attributes defines the file attributes type defined in draft-ietf-secsh-filexfer-02 +// +// Defined in: https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-5 +type Attributes struct { + Flags uint32 + + // AttrSize + Size uint64 + + // AttrUIDGID + UID uint32 + GID uint32 + + // AttrPermissions + Permissions FileMode + + // AttrACmodTime + ATime uint32 + MTime uint32 + + // AttrExtended + ExtendedAttributes []ExtendedAttribute +} + +// GetSize returns the Size field and a bool that is true if and only if the value is valid/defined. +func (a *Attributes) GetSize() (size uint64, ok bool) { + return a.Size, a.Flags&AttrSize != 0 +} + +// SetSize is a convenience function that sets the Size field, +// and marks the field as valid/defined in Flags. +func (a *Attributes) SetSize(size uint64) { + a.Flags |= AttrSize + a.Size = size +} + +// GetUIDGID returns the UID and GID fields and a bool that is true if and only if the values are valid/defined. +func (a *Attributes) GetUIDGID() (uid, gid uint32, ok bool) { + return a.UID, a.GID, a.Flags&AttrUIDGID != 0 +} + +// SetUIDGID is a convenience function that sets the UID and GID fields, +// and marks the fields as valid/defined in Flags. +func (a *Attributes) SetUIDGID(uid, gid uint32) { + a.Flags |= AttrUIDGID + a.UID = uid + a.GID = gid +} + +// GetPermissions returns the Permissions field and a bool that is true if and only if the value is valid/defined. +func (a *Attributes) GetPermissions() (perms FileMode, ok bool) { + return a.Permissions, a.Flags&AttrPermissions != 0 +} + +// SetPermissions is a convenience function that sets the Permissions field, +// and marks the field as valid/defined in Flags. +func (a *Attributes) SetPermissions(perms FileMode) { + a.Flags |= AttrPermissions + a.Permissions = perms +} + +// GetACModTime returns the ATime and MTime fields and a bool that is true if and only if the values are valid/defined. +func (a *Attributes) GetACModTime() (atime, mtime uint32, ok bool) { + return a.ATime, a.MTime, a.Flags&AttrACModTime != 0 +} + +// SetACModTime is a convenience function that sets the ATime and MTime fields, +// and marks the fields as valid/defined in Flags. +func (a *Attributes) SetACModTime(atime, mtime uint32) { + a.Flags |= AttrACModTime + a.ATime = atime + a.MTime = mtime +} + +// Len returns the number of bytes a would marshal into. +func (a *Attributes) Len() int { + length := 4 + + if a.Flags&AttrSize != 0 { + length += 8 + } + + if a.Flags&AttrUIDGID != 0 { + length += 4 + 4 + } + + if a.Flags&AttrPermissions != 0 { + length += 4 + } + + if a.Flags&AttrACModTime != 0 { + length += 4 + 4 + } + + if a.Flags&AttrExtended != 0 { + length += 4 + + for _, ext := range a.ExtendedAttributes { + length += ext.Len() + } + } + + return length +} + +// MarshalInto marshals e onto the end of the given Buffer. +func (a *Attributes) MarshalInto(buf *Buffer) { + buf.AppendUint32(a.Flags) + + if a.Flags&AttrSize != 0 { + buf.AppendUint64(a.Size) + } + + if a.Flags&AttrUIDGID != 0 { + buf.AppendUint32(a.UID) + buf.AppendUint32(a.GID) + } + + if a.Flags&AttrPermissions != 0 { + buf.AppendUint32(uint32(a.Permissions)) + } + + if a.Flags&AttrACModTime != 0 { + buf.AppendUint32(a.ATime) + buf.AppendUint32(a.MTime) + } + + if a.Flags&AttrExtended != 0 { + buf.AppendUint32(uint32(len(a.ExtendedAttributes))) + + for _, ext := range a.ExtendedAttributes { + ext.MarshalInto(buf) + } + } +} + +// MarshalBinary returns a as the binary encoding of a. +func (a *Attributes) MarshalBinary() ([]byte, error) { + buf := NewBuffer(make([]byte, 0, a.Len())) + a.MarshalInto(buf) + return buf.Bytes(), nil +} + +// UnmarshalFrom unmarshals an Attributes from the given Buffer into e. +// +// NOTE: The values of fields not covered in the a.Flags are explicitly undefined. +func (a *Attributes) UnmarshalFrom(buf *Buffer) (err error) { + flags := buf.ConsumeUint32() + + return a.XXX_UnmarshalByFlags(flags, buf) +} + +// XXX_UnmarshalByFlags uses the pre-existing a.Flags field to determine which fields to decode. +// DO NOT USE THIS: it is an anti-corruption function to implement existing internal usage in pkg/sftp. +// This function is not a part of any compatibility promise. +func (a *Attributes) XXX_UnmarshalByFlags(flags uint32, buf *Buffer) (err error) { + a.Flags = flags + + // Short-circuit dummy attributes. + if a.Flags == 0 { + return buf.Err + } + + if a.Flags&AttrSize != 0 { + a.Size = buf.ConsumeUint64() + } + + if a.Flags&AttrUIDGID != 0 { + a.UID = buf.ConsumeUint32() + a.GID = buf.ConsumeUint32() + } + + if a.Flags&AttrPermissions != 0 { + a.Permissions = FileMode(buf.ConsumeUint32()) + } + + if a.Flags&AttrACModTime != 0 { + a.ATime = buf.ConsumeUint32() + a.MTime = buf.ConsumeUint32() + } + + if a.Flags&AttrExtended != 0 { + count := buf.ConsumeCount() + + a.ExtendedAttributes = make([]ExtendedAttribute, count) + for i := range a.ExtendedAttributes { + a.ExtendedAttributes[i].UnmarshalFrom(buf) + } + } + + return buf.Err +} + +// UnmarshalBinary decodes the binary encoding of Attributes into e. +func (a *Attributes) UnmarshalBinary(data []byte) error { + return a.UnmarshalFrom(NewBuffer(data)) +} + +// ExtendedAttribute defines the extended file attribute type defined in draft-ietf-secsh-filexfer-02 +// +// Defined in: https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-5 +type ExtendedAttribute struct { + Type string + Data string +} + +// Len returns the number of bytes e would marshal into. +func (e *ExtendedAttribute) Len() int { + return 4 + len(e.Type) + 4 + len(e.Data) +} + +// MarshalInto marshals e onto the end of the given Buffer. +func (e *ExtendedAttribute) MarshalInto(buf *Buffer) { + buf.AppendString(e.Type) + buf.AppendString(e.Data) +} + +// MarshalBinary returns e as the binary encoding of e. +func (e *ExtendedAttribute) MarshalBinary() ([]byte, error) { + buf := NewBuffer(make([]byte, 0, e.Len())) + e.MarshalInto(buf) + return buf.Bytes(), nil +} + +// UnmarshalFrom unmarshals an ExtendedAattribute from the given Buffer into e. +func (e *ExtendedAttribute) UnmarshalFrom(buf *Buffer) (err error) { + *e = ExtendedAttribute{ + Type: buf.ConsumeString(), + Data: buf.ConsumeString(), + } + + return buf.Err +} + +// UnmarshalBinary decodes the binary encoding of ExtendedAttribute into e. +func (e *ExtendedAttribute) UnmarshalBinary(data []byte) error { + return e.UnmarshalFrom(NewBuffer(data)) +} + +// NameEntry implements the SSH_FXP_NAME repeated data type from draft-ietf-secsh-filexfer-02 +// +// This type is incompatible with versions 4 or higher. +type NameEntry struct { + Filename string + Longname string + Attrs Attributes +} + +// Len returns the number of bytes e would marshal into. +func (e *NameEntry) Len() int { + return 4 + len(e.Filename) + 4 + len(e.Longname) + e.Attrs.Len() +} + +// MarshalInto marshals e onto the end of the given Buffer. +func (e *NameEntry) MarshalInto(buf *Buffer) { + buf.AppendString(e.Filename) + buf.AppendString(e.Longname) + + e.Attrs.MarshalInto(buf) +} + +// MarshalBinary returns e as the binary encoding of e. +func (e *NameEntry) MarshalBinary() ([]byte, error) { + buf := NewBuffer(make([]byte, 0, e.Len())) + e.MarshalInto(buf) + return buf.Bytes(), nil +} + +// UnmarshalFrom unmarshals an NameEntry from the given Buffer into e. +// +// NOTE: The values of fields not covered in the a.Flags are explicitly undefined. +func (e *NameEntry) UnmarshalFrom(buf *Buffer) (err error) { + *e = NameEntry{ + Filename: buf.ConsumeString(), + Longname: buf.ConsumeString(), + } + + return e.Attrs.UnmarshalFrom(buf) +} + +// UnmarshalBinary decodes the binary encoding of NameEntry into e. +func (e *NameEntry) UnmarshalBinary(data []byte) error { + return e.UnmarshalFrom(NewBuffer(data)) +} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go new file mode 100644 index 000000000..bd4783bb8 --- /dev/null +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go @@ -0,0 +1,340 @@ +package sshfx + +import ( + "encoding/binary" + "errors" +) + +// Various encoding errors. +var ( + ErrShortPacket = errors.New("packet too short") + ErrLongPacket = errors.New("packet too long") +) + +// Buffer wraps up the various encoding details of the SSH format. +// +// Data types are encoded as per section 4 from https://tools.ietf.org/html/draft-ietf-secsh-architecture-09#page-8 +type Buffer struct { + b []byte + off int + Err error +} + +// NewBuffer creates and initializes a new buffer using buf as its initial contents. +// The new buffer takes ownership of buf, and the caller should not use buf after this call. +// +// In most cases, new(Buffer) (or just declaring a Buffer variable) is sufficient to initialize a Buffer. +func NewBuffer(buf []byte) *Buffer { + return &Buffer{ + b: buf, + } +} + +// NewMarshalBuffer creates a new Buffer ready to start marshaling a Packet into. +// It preallocates enough space for uint32(length), uint8(type), uint32(request-id) and size more bytes. +func NewMarshalBuffer(size int) *Buffer { + return NewBuffer(make([]byte, 4+1+4+size)) +} + +// Bytes returns a slice of length b.Len() holding the unconsumed bytes in the Buffer. +// The slice is valid for use only until the next buffer modification +// (that is, only until the next call to an Append or Consume method). +func (b *Buffer) Bytes() []byte { + return b.b[b.off:] +} + +// Len returns the number of unconsumed bytes in the buffer. +func (b *Buffer) Len() int { return len(b.b) - b.off } + +// Cap returns the capacity of the buffer’s underlying byte slice, +// that is, the total space allocated for the buffer’s data. +func (b *Buffer) Cap() int { return cap(b.b) } + +// Reset resets the buffer to be empty, but it retains the underlying storage for use by future Appends. +func (b *Buffer) Reset() { + *b = Buffer{ + b: b.b[:0], + } +} + +// StartPacket resets and initializes the buffer to be ready to start marshaling a packet into. +// It truncates the buffer, reserves space for uint32(length), then appends the given packetType and requestID. +func (b *Buffer) StartPacket(packetType PacketType, requestID uint32) { + *b = Buffer{ + b: append(b.b[:0], make([]byte, 4)...), + } + + b.AppendUint8(uint8(packetType)) + b.AppendUint32(requestID) +} + +// Packet finalizes the packet started from StartPacket. +// It is expected that this will end the ownership of the underlying byte-slice, +// and so the returned byte-slices may be reused the same as any other byte-slice, +// the caller should not use this buffer after this call. +// +// It writes the packet body length into the first four bytes of the buffer in network byte order (big endian). +// The packet body length is the length of this buffer less the 4-byte length itself, plus the length of payload. +// +// It is assumed that no Consume methods have been called on this buffer, +// and so it returns the whole underlying slice. +func (b *Buffer) Packet(payload []byte) (header, payloadPassThru []byte, err error) { + b.PutLength(len(b.b) - 4 + len(payload)) + + return b.b, payload, nil +} + +// ConsumeUint8 consumes a single byte from the buffer. +// If the buffer does not have enough data, it will set Err to ErrShortPacket. +func (b *Buffer) ConsumeUint8() uint8 { + if b.Err != nil { + return 0 + } + + if b.Len() < 1 { + b.off = len(b.b) + b.Err = ErrShortPacket + return 0 + } + + var v uint8 + v, b.off = b.b[b.off], b.off+1 + return v +} + +// AppendUint8 appends a single byte into the buffer. +func (b *Buffer) AppendUint8(v uint8) { + b.b = append(b.b, v) +} + +// ConsumeBool consumes a single byte from the buffer, and returns true if that byte is non-zero. +// If the buffer does not have enough data, it will set Err to ErrShortPacket. +func (b *Buffer) ConsumeBool() bool { + return b.ConsumeUint8() != 0 +} + +// AppendBool appends a single bool into the buffer. +// It encodes it as a single byte, with false as 0, and true as 1. +func (b *Buffer) AppendBool(v bool) { + if v { + b.AppendUint8(1) + } else { + b.AppendUint8(0) + } +} + +// ConsumeUint16 consumes a single uint16 from the buffer, in network byte order (big-endian). +// If the buffer does not have enough data, it will set Err to ErrShortPacket. +func (b *Buffer) ConsumeUint16() uint16 { + if b.Err != nil { + return 0 + } + + if b.Len() < 2 { + b.off = len(b.b) + b.Err = ErrShortPacket + return 0 + } + + v := binary.BigEndian.Uint16(b.b[b.off:]) + b.off += 2 + return v +} + +// AppendUint16 appends single uint16 into the buffer, in network byte order (big-endian). +func (b *Buffer) AppendUint16(v uint16) { + b.b = append(b.b, + byte(v>>8), + byte(v>>0), + ) +} + +// unmarshalUint32 is used internally to read the packet length. +// It is unsafe, and so not exported. +// Even within this package, its use should be avoided. +func unmarshalUint32(b []byte) uint32 { + return binary.BigEndian.Uint32(b[:4]) +} + +// ConsumeUint32 consumes a single uint32 from the buffer, in network byte order (big-endian). +// If the buffer does not have enough data, it will set Err to ErrShortPacket. +func (b *Buffer) ConsumeUint32() uint32 { + if b.Err != nil { + return 0 + } + + if b.Len() < 4 { + b.off = len(b.b) + b.Err = ErrShortPacket + return 0 + } + + v := binary.BigEndian.Uint32(b.b[b.off:]) + b.off += 4 + return v +} + +// AppendUint32 appends a single uint32 into the buffer, in network byte order (big-endian). +func (b *Buffer) AppendUint32(v uint32) { + b.b = append(b.b, + byte(v>>24), + byte(v>>16), + byte(v>>8), + byte(v>>0), + ) +} + +// ConsumeCount consumes a single uint32 count from the buffer, in network byte order (big-endian) as an int. +// If the buffer does not have enough data, it will set Err to ErrShortPacket. +func (b *Buffer) ConsumeCount() int { + return int(b.ConsumeUint32()) +} + +// AppendCount appends a single int length as a uint32 into the buffer, in network byte order (big-endian). +func (b *Buffer) AppendCount(v int) { + b.AppendUint32(uint32(v)) +} + +// ConsumeUint64 consumes a single uint64 from the buffer, in network byte order (big-endian). +// If the buffer does not have enough data, it will set Err to ErrShortPacket. +func (b *Buffer) ConsumeUint64() uint64 { + if b.Err != nil { + return 0 + } + + if b.Len() < 8 { + b.off = len(b.b) + b.Err = ErrShortPacket + return 0 + } + + v := binary.BigEndian.Uint64(b.b[b.off:]) + b.off += 8 + return v +} + +// AppendUint64 appends a single uint64 into the buffer, in network byte order (big-endian). +func (b *Buffer) AppendUint64(v uint64) { + b.b = append(b.b, + byte(v>>56), + byte(v>>48), + byte(v>>40), + byte(v>>32), + byte(v>>24), + byte(v>>16), + byte(v>>8), + byte(v>>0), + ) +} + +// ConsumeInt64 consumes a single int64 from the buffer, in network byte order (big-endian) with two’s complement. +// If the buffer does not have enough data, it will set Err to ErrShortPacket. +func (b *Buffer) ConsumeInt64() int64 { + return int64(b.ConsumeUint64()) +} + +// AppendInt64 appends a single int64 into the buffer, in network byte order (big-endian) with two’s complement. +func (b *Buffer) AppendInt64(v int64) { + b.AppendUint64(uint64(v)) +} + +// ConsumeByteSlice consumes a single string of raw binary data from the buffer. +// A string is a uint32 length, followed by that number of raw bytes. +// If the buffer does not have enough data, or defines a length larger than available, it will set Err to ErrShortPacket. +// +// The returned slice aliases the buffer contents, and is valid only as long as the buffer is not reused +// (that is, only until the next call to Reset, PutLength, StartPacket, or UnmarshalBinary). +// +// In no case will any Consume calls return overlapping slice aliases, +// and Append calls are guaranteed to not disturb this slice alias. +func (b *Buffer) ConsumeByteSlice() []byte { + length := int(b.ConsumeUint32()) + if b.Err != nil { + return nil + } + + if b.Len() < length || length < 0 { + b.off = len(b.b) + b.Err = ErrShortPacket + return nil + } + + v := b.b[b.off:] + if len(v) > length || cap(v) > length { + v = v[:length:length] + } + b.off += int(length) + return v +} + +// ConsumeByteSliceCopy consumes a single string of raw binary data as a copy from the buffer. +// A string is a uint32 length, followed by that number of raw bytes. +// If the buffer does not have enough data, or defines a length larger than available, it will set Err to ErrShortPacket. +// +// The returned slice does not alias any buffer contents, +// and will therefore be valid even if the buffer is later reused. +// +// If hint has sufficient capacity to hold the data, it will be reused and overwritten, +// otherwise a new backing slice will be allocated and returned. +func (b *Buffer) ConsumeByteSliceCopy(hint []byte) []byte { + data := b.ConsumeByteSlice() + + if grow := len(data) - len(hint); grow > 0 { + hint = append(hint, make([]byte, grow)...) + } + + n := copy(hint, data) + hint = hint[:n] + return hint +} + +// AppendByteSlice appends a single string of raw binary data into the buffer. +// A string is a uint32 length, followed by that number of raw bytes. +func (b *Buffer) AppendByteSlice(v []byte) { + b.AppendUint32(uint32(len(v))) + b.b = append(b.b, v...) +} + +// ConsumeString consumes a single string of binary data from the buffer. +// A string is a uint32 length, followed by that number of raw bytes. +// If the buffer does not have enough data, or defines a length larger than available, it will set Err to ErrShortPacket. +// +// NOTE: Go implicitly assumes that strings contain UTF-8 encoded data. +// All caveats on using arbitrary binary data in Go strings applies. +func (b *Buffer) ConsumeString() string { + return string(b.ConsumeByteSlice()) +} + +// AppendString appends a single string of binary data into the buffer. +// A string is a uint32 length, followed by that number of raw bytes. +func (b *Buffer) AppendString(v string) { + b.AppendByteSlice([]byte(v)) +} + +// PutLength writes the given size into the first four bytes of the buffer in network byte order (big endian). +func (b *Buffer) PutLength(size int) { + if len(b.b) < 4 { + b.b = append(b.b, make([]byte, 4-len(b.b))...) + } + + binary.BigEndian.PutUint32(b.b, uint32(size)) +} + +// MarshalBinary returns a clone of the full internal buffer. +func (b *Buffer) MarshalBinary() ([]byte, error) { + clone := make([]byte, len(b.b)) + n := copy(clone, b.b) + return clone[:n], nil +} + +// UnmarshalBinary sets the internal buffer of b to be a clone of data, and zeros the internal offset. +func (b *Buffer) UnmarshalBinary(data []byte) error { + if grow := len(data) - len(b.b); grow > 0 { + b.b = append(b.b, make([]byte, grow)...) + } + + n := copy(b.b, data) + b.b = b.b[:n] + b.off = 0 + return nil +} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go new file mode 100644 index 000000000..f71742530 --- /dev/null +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go @@ -0,0 +1,143 @@ +package sshfx + +import ( + "encoding" + "sync" +) + +// ExtendedData aliases the untyped interface composition of encoding.BinaryMarshaler and encoding.BinaryUnmarshaler. +type ExtendedData = interface { + encoding.BinaryMarshaler + encoding.BinaryUnmarshaler +} + +// ExtendedDataConstructor defines a function that returns a new(ArbitraryExtendedPacket). +type ExtendedDataConstructor func() ExtendedData + +var extendedPacketTypes = struct { + mu sync.RWMutex + constructors map[string]ExtendedDataConstructor +}{ + constructors: make(map[string]ExtendedDataConstructor), +} + +// RegisterExtendedPacketType defines a specific ExtendedDataConstructor for the given extension string. +func RegisterExtendedPacketType(extension string, constructor ExtendedDataConstructor) { + extendedPacketTypes.mu.Lock() + defer extendedPacketTypes.mu.Unlock() + + if _, exist := extendedPacketTypes.constructors[extension]; exist { + panic("encoding/ssh/filexfer: multiple registration of extended packet type " + extension) + } + + extendedPacketTypes.constructors[extension] = constructor +} + +func newExtendedPacket(extension string) ExtendedData { + extendedPacketTypes.mu.RLock() + defer extendedPacketTypes.mu.RUnlock() + + if f := extendedPacketTypes.constructors[extension]; f != nil { + return f() + } + + return new(Buffer) +} + +// ExtendedPacket defines the SSH_FXP_CLOSE packet. +type ExtendedPacket struct { + ExtendedRequest string + + Data ExtendedData +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *ExtendedPacket) Type() PacketType { + return PacketTypeExtended +} + +// MarshalPacket returns p as a two-part binary encoding of p. +// +// The Data is marshaled into binary, and returned as the payload. +func (p *ExtendedPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.ExtendedRequest) // string(extended-request) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeExtended, reqid) + buf.AppendString(p.ExtendedRequest) + + if p.Data != nil { + payload, err = p.Data.MarshalBinary() + if err != nil { + return nil, nil, err + } + } + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +// +// If p.Data is nil, and the extension has been registered, a new type will be made from the registration. +// If the extension has not been registered, then a new Buffer will be allocated. +// Then the request-specific-data will be unmarshaled from the rest of the buffer. +func (p *ExtendedPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + p.ExtendedRequest = buf.ConsumeString() + if buf.Err != nil { + return buf.Err + } + + if p.Data == nil { + p.Data = newExtendedPacket(p.ExtendedRequest) + } + + return p.Data.UnmarshalBinary(buf.Bytes()) +} + +// ExtendedReplyPacket defines the SSH_FXP_CLOSE packet. +type ExtendedReplyPacket struct { + Data ExtendedData +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *ExtendedReplyPacket) Type() PacketType { + return PacketTypeExtendedReply +} + +// MarshalPacket returns p as a two-part binary encoding of p. +// +// The Data is marshaled into binary, and returned as the payload. +func (p *ExtendedReplyPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + buf = NewMarshalBuffer(0) + } + + buf.StartPacket(PacketTypeExtendedReply, reqid) + + if p.Data != nil { + payload, err = p.Data.MarshalBinary() + if err != nil { + return nil, nil, err + } + } + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +// +// If p.Data is nil, and there is request-specific-data, +// then the request-specific-data will be wrapped in a Buffer and assigned to p.Data. +func (p *ExtendedReplyPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Data == nil { + p.Data = new(Buffer) + } + + return p.Data.UnmarshalBinary(buf.Bytes()) +} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go new file mode 100644 index 000000000..c425780ca --- /dev/null +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go @@ -0,0 +1,43 @@ +package sshfx + +// ExtensionPair defines the extension-pair type defined in draft-ietf-secsh-filexfer-13. +// This type is backwards-compatible with how draft-ietf-secsh-filexfer-02 defines extensions. +// +// Defined in: https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13#section-4.2 +type ExtensionPair struct { + Name string + Data string +} + +// Len returns the number of bytes e would marshal into. +func (e *ExtensionPair) Len() int { + return 4 + len(e.Name) + 4 + len(e.Data) +} + +// MarshalInto marshals e onto the end of the given Buffer. +func (e *ExtensionPair) MarshalInto(buf *Buffer) { + buf.AppendString(e.Name) + buf.AppendString(e.Data) +} + +// MarshalBinary returns e as the binary encoding of e. +func (e *ExtensionPair) MarshalBinary() ([]byte, error) { + buf := NewBuffer(make([]byte, 0, e.Len())) + e.MarshalInto(buf) + return buf.Bytes(), nil +} + +// UnmarshalFrom unmarshals an ExtensionPair from the given Buffer into e. +func (e *ExtensionPair) UnmarshalFrom(buf *Buffer) (err error) { + *e = ExtensionPair{ + Name: buf.ConsumeString(), + Data: buf.ConsumeString(), + } + + return buf.Err +} + +// UnmarshalBinary decodes the binary encoding of ExtensionPair into e. +func (e *ExtensionPair) UnmarshalBinary(data []byte) error { + return e.UnmarshalFrom(NewBuffer(data)) +} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go new file mode 100644 index 000000000..d3009994a --- /dev/null +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go @@ -0,0 +1,54 @@ +// Package sshfx implements the wire encoding for secsh-filexfer as described in https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt +package sshfx + +// PacketMarshaller narrowly defines packets that will only be transmitted. +// +// ExtendedPacket types will often only implement this interface, +// since decoding the whole packet body of an ExtendedPacket can only be done dependent on the ExtendedRequest field. +type PacketMarshaller interface { + // MarshalPacket is the primary intended way to encode a packet. + // The request-id for the packet is set from reqid. + // + // An optional buffer may be given in b. + // If the buffer has a minimum capacity, it shall be truncated and used to marshal the header into. + // The minimum capacity for the packet must be a constant expression, and should be at least 9. + // + // It shall return the main body of the encoded packet in header, + // and may optionally return an additional payload to be written immediately after the header. + // + // It shall encode in the first 4-bytes of the header the proper length of the rest of the header+payload. + MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) +} + +// Packet defines the behavior of a full generic SFTP packet. +// +// InitPacket, and VersionPacket are not generic SFTP packets, and instead implement (Un)MarshalBinary. +// +// ExtendedPacket types should not iplement this interface, +// since decoding the whole packet body of an ExtendedPacket can only be done dependent on the ExtendedRequest field. +type Packet interface { + PacketMarshaller + + // Type returns the SSH_FXP_xy value associated with the specific packet. + Type() PacketType + + // UnmarshalPacketBody decodes a packet body from the given Buffer. + // It is assumed that the common header values of the length, type and request-id have already been consumed. + // + // Implementations should not alias the given Buffer, + // instead they can consider prepopulating an internal buffer as a hint, + // and copying into that buffer if it has sufficient length. + UnmarshalPacketBody(buf *Buffer) error +} + +// ComposePacket converts returns from MarshalPacket into an equivalent call to MarshalBinary. +func ComposePacket(header, payload []byte, err error) ([]byte, error) { + return append(header, payload...), err +} + +// Default length values, +// Defined in draft-ietf-secsh-filexfer-02 section 3. +const ( + DefaultMaxPacketLength = 34000 + DefaultMaxDataLength = 32768 +) diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go new file mode 100644 index 000000000..9abcbafcb --- /dev/null +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go @@ -0,0 +1,147 @@ +package sshfx + +import ( + "fmt" +) + +// Status defines the SFTP error codes used in SSH_FXP_STATUS response packets. +type Status uint32 + +// Defines the various SSH_FX_* values. +const ( + // see draft-ietf-secsh-filexfer-02 + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-7 + StatusOK = Status(iota) + StatusEOF + StatusNoSuchFile + StatusPermissionDenied + StatusFailure + StatusBadMessage + StatusNoConnection + StatusConnectionLost + StatusOPUnsupported + + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-03.txt#section-7 + StatusV4InvalidHandle + StatusV4NoSuchPath + StatusV4FileAlreadyExists + StatusV4WriteProtect + + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-04.txt#section-7 + StatusV4NoMedia + + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-05.txt#section-7 + StatusV5NoSpaceOnFilesystem + StatusV5QuotaExceeded + StatusV5UnknownPrincipal + StatusV5LockConflict + + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-06.txt#section-8 + StatusV6DirNotEmpty + StatusV6NotADirectory + StatusV6InvalidFilename + StatusV6LinkLoop + + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-07.txt#section-8 + StatusV6CannotDelete + StatusV6InvalidParameter + StatusV6FileIsADirectory + StatusV6ByteRangeLockConflict + StatusV6ByteRangeLockRefused + StatusV6DeletePending + + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-08.txt#section-8.1 + StatusV6FileCorrupt + + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-10.txt#section-9.1 + StatusV6OwnerInvalid + StatusV6GroupInvalid + + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13#section-9.1 + StatusV6NoMatchingByteRangeLock +) + +func (s Status) Error() string { + return s.String() +} + +// Is returns true if the target is the same Status code, +// or target is a StatusPacket with the same Status code. +func (s Status) Is(target error) bool { + if target, ok := target.(*StatusPacket); ok { + return target.StatusCode == s + } + + return s == target +} + +func (s Status) String() string { + switch s { + case StatusOK: + return "SSH_FX_OK" + case StatusEOF: + return "SSH_FX_EOF" + case StatusNoSuchFile: + return "SSH_FX_NO_SUCH_FILE" + case StatusPermissionDenied: + return "SSH_FX_PERMISSION_DENIED" + case StatusFailure: + return "SSH_FX_FAILURE" + case StatusBadMessage: + return "SSH_FX_BAD_MESSAGE" + case StatusNoConnection: + return "SSH_FX_NO_CONNECTION" + case StatusConnectionLost: + return "SSH_FX_CONNECTION_LOST" + case StatusOPUnsupported: + return "SSH_FX_OP_UNSUPPORTED" + case StatusV4InvalidHandle: + return "SSH_FX_INVALID_HANDLE" + case StatusV4NoSuchPath: + return "SSH_FX_NO_SUCH_PATH" + case StatusV4FileAlreadyExists: + return "SSH_FX_FILE_ALREADY_EXISTS" + case StatusV4WriteProtect: + return "SSH_FX_WRITE_PROTECT" + case StatusV4NoMedia: + return "SSH_FX_NO_MEDIA" + case StatusV5NoSpaceOnFilesystem: + return "SSH_FX_NO_SPACE_ON_FILESYSTEM" + case StatusV5QuotaExceeded: + return "SSH_FX_QUOTA_EXCEEDED" + case StatusV5UnknownPrincipal: + return "SSH_FX_UNKNOWN_PRINCIPAL" + case StatusV5LockConflict: + return "SSH_FX_LOCK_CONFLICT" + case StatusV6DirNotEmpty: + return "SSH_FX_DIR_NOT_EMPTY" + case StatusV6NotADirectory: + return "SSH_FX_NOT_A_DIRECTORY" + case StatusV6InvalidFilename: + return "SSH_FX_INVALID_FILENAME" + case StatusV6LinkLoop: + return "SSH_FX_LINK_LOOP" + case StatusV6CannotDelete: + return "SSH_FX_CANNOT_DELETE" + case StatusV6InvalidParameter: + return "SSH_FX_INVALID_PARAMETER" + case StatusV6FileIsADirectory: + return "SSH_FX_FILE_IS_A_DIRECTORY" + case StatusV6ByteRangeLockConflict: + return "SSH_FX_BYTE_RANGE_LOCK_CONFLICT" + case StatusV6ByteRangeLockRefused: + return "SSH_FX_BYTE_RANGE_LOCK_REFUSED" + case StatusV6DeletePending: + return "SSH_FX_DELETE_PENDING" + case StatusV6FileCorrupt: + return "SSH_FX_FILE_CORRUPT" + case StatusV6OwnerInvalid: + return "SSH_FX_OWNER_INVALID" + case StatusV6GroupInvalid: + return "SSH_FX_GROUP_INVALID" + case StatusV6NoMatchingByteRangeLock: + return "SSH_FX_NO_MATCHING_BYTE_RANGE_LOCK" + default: + return fmt.Sprintf("SSH_FX_UNKNOWN(%d)", s) + } +} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go new file mode 100644 index 000000000..780800215 --- /dev/null +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go @@ -0,0 +1,169 @@ +package sshfx + +import ( + "fmt" +) + +// PacketType defines the various SFTP packet types. +type PacketType uint8 + +// Request packet types. +const ( + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-3 + PacketTypeInit = PacketType(iota + 1) + PacketTypeVersion + PacketTypeOpen + PacketTypeClose + PacketTypeRead + PacketTypeWrite + PacketTypeLStat + PacketTypeFStat + PacketTypeSetstat + PacketTypeFSetstat + PacketTypeOpenDir + PacketTypeReadDir + PacketTypeRemove + PacketTypeMkdir + PacketTypeRmdir + PacketTypeRealPath + PacketTypeStat + PacketTypeRename + PacketTypeReadLink + PacketTypeSymlink + + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-07.txt#section-3.3 + PacketTypeV6Link + + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-08.txt#section-3.3 + PacketTypeV6Block + PacketTypeV6Unblock +) + +// Response packet types. +const ( + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-3 + PacketTypeStatus = PacketType(iota + 101) + PacketTypeHandle + PacketTypeData + PacketTypeName + PacketTypeAttrs +) + +// Extended packet types. +const ( + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-3 + PacketTypeExtended = PacketType(iota + 200) + PacketTypeExtendedReply +) + +func (f PacketType) String() string { + switch f { + case PacketTypeInit: + return "SSH_FXP_INIT" + case PacketTypeVersion: + return "SSH_FXP_VERSION" + case PacketTypeOpen: + return "SSH_FXP_OPEN" + case PacketTypeClose: + return "SSH_FXP_CLOSE" + case PacketTypeRead: + return "SSH_FXP_READ" + case PacketTypeWrite: + return "SSH_FXP_WRITE" + case PacketTypeLStat: + return "SSH_FXP_LSTAT" + case PacketTypeFStat: + return "SSH_FXP_FSTAT" + case PacketTypeSetstat: + return "SSH_FXP_SETSTAT" + case PacketTypeFSetstat: + return "SSH_FXP_FSETSTAT" + case PacketTypeOpenDir: + return "SSH_FXP_OPENDIR" + case PacketTypeReadDir: + return "SSH_FXP_READDIR" + case PacketTypeRemove: + return "SSH_FXP_REMOVE" + case PacketTypeMkdir: + return "SSH_FXP_MKDIR" + case PacketTypeRmdir: + return "SSH_FXP_RMDIR" + case PacketTypeRealPath: + return "SSH_FXP_REALPATH" + case PacketTypeStat: + return "SSH_FXP_STAT" + case PacketTypeRename: + return "SSH_FXP_RENAME" + case PacketTypeReadLink: + return "SSH_FXP_READLINK" + case PacketTypeSymlink: + return "SSH_FXP_SYMLINK" + case PacketTypeV6Link: + return "SSH_FXP_LINK" + case PacketTypeV6Block: + return "SSH_FXP_BLOCK" + case PacketTypeV6Unblock: + return "SSH_FXP_UNBLOCK" + case PacketTypeStatus: + return "SSH_FXP_STATUS" + case PacketTypeHandle: + return "SSH_FXP_HANDLE" + case PacketTypeData: + return "SSH_FXP_DATA" + case PacketTypeName: + return "SSH_FXP_NAME" + case PacketTypeAttrs: + return "SSH_FXP_ATTRS" + case PacketTypeExtended: + return "SSH_FXP_EXTENDED" + case PacketTypeExtendedReply: + return "SSH_FXP_EXTENDED_REPLY" + default: + return fmt.Sprintf("SSH_FXP_UNKNOWN(%d)", f) + } +} + +func newPacketFromType(typ PacketType) (Packet, error) { + switch typ { + case PacketTypeOpen: + return new(OpenPacket), nil + case PacketTypeClose: + return new(ClosePacket), nil + case PacketTypeRead: + return new(ReadPacket), nil + case PacketTypeWrite: + return new(WritePacket), nil + case PacketTypeLStat: + return new(LStatPacket), nil + case PacketTypeFStat: + return new(FStatPacket), nil + case PacketTypeSetstat: + return new(SetstatPacket), nil + case PacketTypeFSetstat: + return new(FSetstatPacket), nil + case PacketTypeOpenDir: + return new(OpenDirPacket), nil + case PacketTypeReadDir: + return new(ReadDirPacket), nil + case PacketTypeRemove: + return new(RemovePacket), nil + case PacketTypeMkdir: + return new(MkdirPacket), nil + case PacketTypeRmdir: + return new(RmdirPacket), nil + case PacketTypeRealPath: + return new(RealPathPacket), nil + case PacketTypeStat: + return new(StatPacket), nil + case PacketTypeRename: + return new(RenamePacket), nil + case PacketTypeReadLink: + return new(ReadLinkPacket), nil + case PacketTypeSymlink: + return new(SymlinkPacket), nil + case PacketTypeExtended: + return new(ExtendedPacket), nil + default: + return nil, fmt.Errorf("unexpected request packet type: %v", typ) + } +} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go new file mode 100644 index 000000000..44594acff --- /dev/null +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go @@ -0,0 +1,230 @@ +package sshfx + +// ClosePacket defines the SSH_FXP_CLOSE packet. +type ClosePacket struct { + Handle string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *ClosePacket) Type() PacketType { + return PacketTypeClose +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *ClosePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Handle) // string(handle) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeClose, reqid) + buf.AppendString(p.Handle) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *ClosePacket) UnmarshalPacketBody(buf *Buffer) (err error) { + *p = ClosePacket{ + Handle: buf.ConsumeString(), + } + + return buf.Err +} + +// ReadPacket defines the SSH_FXP_READ packet. +type ReadPacket struct { + Handle string + Offset uint64 + Length uint32 +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *ReadPacket) Type() PacketType { + return PacketTypeRead +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *ReadPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + // string(handle) + uint64(offset) + uint32(len) + size := 4 + len(p.Handle) + 8 + 4 + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeRead, reqid) + buf.AppendString(p.Handle) + buf.AppendUint64(p.Offset) + buf.AppendUint32(p.Length) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *ReadPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + *p = ReadPacket{ + Handle: buf.ConsumeString(), + Offset: buf.ConsumeUint64(), + Length: buf.ConsumeUint32(), + } + + return buf.Err +} + +// WritePacket defines the SSH_FXP_WRITE packet. +type WritePacket struct { + Handle string + Offset uint64 + Data []byte +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *WritePacket) Type() PacketType { + return PacketTypeWrite +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *WritePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + // string(handle) + uint64(offset) + uint32(len(data)); data content in payload + size := 4 + len(p.Handle) + 8 + 4 + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeWrite, reqid) + buf.AppendString(p.Handle) + buf.AppendUint64(p.Offset) + buf.AppendUint32(uint32(len(p.Data))) + + return buf.Packet(p.Data) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +// +// If p.Data is already populated, and of sufficient length to hold the data, +// then this will copy the data into that byte slice. +// +// If p.Data has a length insufficient to hold the data, +// then this will make a new slice of sufficient length, and copy the data into that. +// +// This means this _does not_ alias any of the data buffer that is passed in. +func (p *WritePacket) UnmarshalPacketBody(buf *Buffer) (err error) { + *p = WritePacket{ + Handle: buf.ConsumeString(), + Offset: buf.ConsumeUint64(), + Data: buf.ConsumeByteSliceCopy(p.Data), + } + + return buf.Err +} + +// FStatPacket defines the SSH_FXP_FSTAT packet. +type FStatPacket struct { + Handle string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *FStatPacket) Type() PacketType { + return PacketTypeFStat +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *FStatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Handle) // string(handle) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeFStat, reqid) + buf.AppendString(p.Handle) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *FStatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + *p = FStatPacket{ + Handle: buf.ConsumeString(), + } + + return buf.Err +} + +// FSetstatPacket defines the SSH_FXP_FSETSTAT packet. +type FSetstatPacket struct { + Handle string + Attrs Attributes +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *FSetstatPacket) Type() PacketType { + return PacketTypeFSetstat +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *FSetstatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Handle) + p.Attrs.Len() // string(handle) + ATTRS(attrs) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeFSetstat, reqid) + buf.AppendString(p.Handle) + + p.Attrs.MarshalInto(buf) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *FSetstatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + *p = FSetstatPacket{ + Handle: buf.ConsumeString(), + } + + return p.Attrs.UnmarshalFrom(buf) +} + +// ReadDirPacket defines the SSH_FXP_READDIR packet. +type ReadDirPacket struct { + Handle string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *ReadDirPacket) Type() PacketType { + return PacketTypeReadDir +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *ReadDirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Handle) // string(handle) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeReadDir, reqid) + buf.AppendString(p.Handle) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *ReadDirPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + *p = ReadDirPacket{ + Handle: buf.ConsumeString(), + } + + return buf.Err +} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go new file mode 100644 index 000000000..c553ee2e2 --- /dev/null +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go @@ -0,0 +1,99 @@ +package sshfx + +// InitPacket defines the SSH_FXP_INIT packet. +type InitPacket struct { + Version uint32 + Extensions []*ExtensionPair +} + +// MarshalBinary returns p as the binary encoding of p. +func (p *InitPacket) MarshalBinary() ([]byte, error) { + size := 1 + 4 // byte(type) + uint32(version) + + for _, ext := range p.Extensions { + size += ext.Len() + } + + b := NewBuffer(make([]byte, 4, 4+size)) + b.AppendUint8(uint8(PacketTypeInit)) + b.AppendUint32(p.Version) + + for _, ext := range p.Extensions { + ext.MarshalInto(b) + } + + b.PutLength(size) + + return b.Bytes(), nil +} + +// UnmarshalBinary unmarshals a full raw packet out of the given data. +// It is assumed that the uint32(length) has already been consumed to receive the data. +// It is also assumed that the uint8(type) has already been consumed to which packet to unmarshal into. +func (p *InitPacket) UnmarshalBinary(data []byte) (err error) { + buf := NewBuffer(data) + + *p = InitPacket{ + Version: buf.ConsumeUint32(), + } + + for buf.Len() > 0 { + var ext ExtensionPair + if err := ext.UnmarshalFrom(buf); err != nil { + return err + } + + p.Extensions = append(p.Extensions, &ext) + } + + return buf.Err +} + +// VersionPacket defines the SSH_FXP_VERSION packet. +type VersionPacket struct { + Version uint32 + Extensions []*ExtensionPair +} + +// MarshalBinary returns p as the binary encoding of p. +func (p *VersionPacket) MarshalBinary() ([]byte, error) { + size := 1 + 4 // byte(type) + uint32(version) + + for _, ext := range p.Extensions { + size += ext.Len() + } + + b := NewBuffer(make([]byte, 4, 4+size)) + b.AppendUint8(uint8(PacketTypeVersion)) + b.AppendUint32(p.Version) + + for _, ext := range p.Extensions { + ext.MarshalInto(b) + } + + b.PutLength(size) + + return b.Bytes(), nil +} + +// UnmarshalBinary unmarshals a full raw packet out of the given data. +// It is assumed that the uint32(length) has already been consumed to receive the data. +// It is also assumed that the uint8(type) has already been consumed to which packet to unmarshal into. +func (p *VersionPacket) UnmarshalBinary(data []byte) (err error) { + buf := NewBuffer(data) + + *p = VersionPacket{ + Version: buf.ConsumeUint32(), + } + + for buf.Len() > 0 { + var ext ExtensionPair + if err := ext.UnmarshalFrom(buf); err != nil { + return err + } + + p.Extensions = append(p.Extensions, &ext) + } + + return nil +} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go new file mode 100644 index 000000000..896ba16e5 --- /dev/null +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go @@ -0,0 +1,86 @@ +package sshfx + +// SSH_FXF_* flags. +const ( + FlagRead = 1 << iota // SSH_FXF_READ + FlagWrite // SSH_FXF_WRITE + FlagAppend // SSH_FXF_APPEND + FlagCreate // SSH_FXF_CREAT + FlagTruncate // SSH_FXF_TRUNC + FlagExclusive // SSH_FXF_EXCL +) + +// OpenPacket defines the SSH_FXP_OPEN packet. +type OpenPacket struct { + Filename string + PFlags uint32 + Attrs Attributes +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *OpenPacket) Type() PacketType { + return PacketTypeOpen +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *OpenPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + // string(filename) + uint32(pflags) + ATTRS(attrs) + size := 4 + len(p.Filename) + 4 + p.Attrs.Len() + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeOpen, reqid) + buf.AppendString(p.Filename) + buf.AppendUint32(p.PFlags) + + p.Attrs.MarshalInto(buf) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *OpenPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + *p = OpenPacket{ + Filename: buf.ConsumeString(), + PFlags: buf.ConsumeUint32(), + } + + return p.Attrs.UnmarshalFrom(buf) +} + +// OpenDirPacket defines the SSH_FXP_OPENDIR packet. +type OpenDirPacket struct { + Path string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *OpenDirPacket) Type() PacketType { + return PacketTypeOpenDir +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *OpenDirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Path) // string(path) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeOpenDir, reqid) + buf.AppendString(p.Path) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *OpenDirPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + *p = OpenDirPacket{ + Path: buf.ConsumeString(), + } + + return buf.Err +} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go new file mode 100644 index 000000000..fdf65d057 --- /dev/null +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go @@ -0,0 +1,273 @@ +package sshfx + +import ( + "errors" + "io" +) + +// smallBufferSize is an initial allocation minimal capacity. +const smallBufferSize = 64 + +// RawPacket implements the general packet format from draft-ietf-secsh-filexfer-02 +// +// RawPacket is intended for use in clients receiving responses, +// where a response will be expected to be of a limited number of types, +// and unmarshaling unknown/unexpected response packets is unnecessary. +// +// For servers expecting to receive arbitrary request packet types, +// use RequestPacket. +// +// Defined in https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-3 +type RawPacket struct { + PacketType PacketType + RequestID uint32 + + Data Buffer +} + +// Type returns the Type field defining the SSH_FXP_xy type for this packet. +func (p *RawPacket) Type() PacketType { + return p.PacketType +} + +// Reset clears the pointers and reference-semantic variables of RawPacket, +// releasing underlying resources, and making them and the RawPacket suitable to be reused, +// so long as no other references have been kept. +func (p *RawPacket) Reset() { + p.Data = Buffer{} +} + +// MarshalPacket returns p as a two-part binary encoding of p. +// +// The internal p.RequestID is overridden by the reqid argument. +func (p *RawPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + buf = NewMarshalBuffer(0) + } + + buf.StartPacket(p.PacketType, reqid) + + return buf.Packet(p.Data.Bytes()) +} + +// MarshalBinary returns p as the binary encoding of p. +// +// This is a convenience implementation primarily intended for tests, +// because it is inefficient with allocations. +func (p *RawPacket) MarshalBinary() ([]byte, error) { + return ComposePacket(p.MarshalPacket(p.RequestID, nil)) +} + +// UnmarshalFrom decodes a RawPacket from the given Buffer into p. +// +// The Data field will alias the passed in Buffer, +// so the buffer passed in should not be reused before RawPacket.Reset(). +func (p *RawPacket) UnmarshalFrom(buf *Buffer) error { + *p = RawPacket{ + PacketType: PacketType(buf.ConsumeUint8()), + RequestID: buf.ConsumeUint32(), + } + + p.Data = *buf + + return buf.Err +} + +// UnmarshalBinary decodes a full raw packet out of the given data. +// It is assumed that the uint32(length) has already been consumed to receive the data. +// +// This is a convenience implementation primarily intended for tests, +// because this must clone the given data byte slice, +// as Data is not allowed to alias any part of the data byte slice. +func (p *RawPacket) UnmarshalBinary(data []byte) error { + clone := make([]byte, len(data)) + n := copy(clone, data) + return p.UnmarshalFrom(NewBuffer(clone[:n])) +} + +// readPacket reads a uint32 length-prefixed binary data packet from r. +// using the given byte slice as a backing array. +// +// If the packet length read from r is bigger than maxPacketLength, +// or greater than math.MaxInt32 on a 32-bit implementation, +// then a `ErrLongPacket` error will be returned. +// +// If the given byte slice is insufficient to hold the packet, +// then it will be extended to fill the packet size. +func readPacket(r io.Reader, b []byte, maxPacketLength uint32) ([]byte, error) { + if cap(b) < 4 { + // We will need allocate our own buffer just for reading the packet length. + + // However, we don’t really want to allocate an extremely narrow buffer (4-bytes), + // and cause unnecessary allocation churn from both length reads and small packet reads, + // so we use smallBufferSize from the bytes package as a reasonable guess. + + // But if callers really do want to force narrow throw-away allocation of every packet body, + // they can do so with a buffer of capacity 4. + b = make([]byte, smallBufferSize) + } + + if _, err := io.ReadFull(r, b[:4]); err != nil { + return nil, err + } + + length := unmarshalUint32(b) + if int(length) < 5 { + // Must have at least uint8(type) and uint32(request-id) + + if int(length) < 0 { + // Only possible when strconv.IntSize == 32, + // the packet length is longer than math.MaxInt32, + // and thus longer than any possible slice. + return nil, ErrLongPacket + } + + return nil, ErrShortPacket + } + if length > maxPacketLength { + return nil, ErrLongPacket + } + + if int(length) > cap(b) { + // We know int(length) must be positive, because of tests above. + b = make([]byte, length) + } + + n, err := io.ReadFull(r, b[:length]) + return b[:n], err +} + +// ReadFrom provides a simple functional packet reader, +// using the given byte slice as a backing array. +// +// To protect against potential denial of service attacks, +// if the read packet length is longer than maxPacketLength, +// then no packet data will be read, and ErrLongPacket will be returned. +// (On 32-bit int architectures, all packets >= 2^31 in length +// will return ErrLongPacket regardless of maxPacketLength.) +// +// If the read packet length is longer than cap(b), +// then a throw-away slice will allocated to meet the exact packet length. +// This can be used to limit the length of reused buffers, +// while still allowing reception of occasional large packets. +// +// The Data field may alias the passed in byte slice, +// so the byte slice passed in should not be reused before RawPacket.Reset(). +func (p *RawPacket) ReadFrom(r io.Reader, b []byte, maxPacketLength uint32) error { + b, err := readPacket(r, b, maxPacketLength) + if err != nil { + return err + } + + return p.UnmarshalFrom(NewBuffer(b)) +} + +// RequestPacket implements the general packet format from draft-ietf-secsh-filexfer-02 +// but also automatically decode/encodes valid request packets (2 < type < 100 || type == 200). +// +// RequestPacket is intended for use in servers receiving requests, +// where any arbitrary request may be received, and so decoding them automatically +// is useful. +// +// For clients expecting to receive specific response packet types, +// where automatic unmarshaling of the packet body does not make sense, +// use RawPacket. +// +// Defined in https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-3 +type RequestPacket struct { + RequestID uint32 + + Request Packet +} + +// Type returns the SSH_FXP_xy value associated with the underlying packet. +func (p *RequestPacket) Type() PacketType { + return p.Request.Type() +} + +// Reset clears the pointers and reference-semantic variables in RequestPacket, +// releasing underlying resources, and making them and the RequestPacket suitable to be reused, +// so long as no other references have been kept. +func (p *RequestPacket) Reset() { + p.Request = nil +} + +// MarshalPacket returns p as a two-part binary encoding of p. +// +// The internal p.RequestID is overridden by the reqid argument. +func (p *RequestPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + if p.Request == nil { + return nil, nil, errors.New("empty request packet") + } + + return p.Request.MarshalPacket(reqid, b) +} + +// MarshalBinary returns p as the binary encoding of p. +// +// This is a convenience implementation primarily intended for tests, +// because it is inefficient with allocations. +func (p *RequestPacket) MarshalBinary() ([]byte, error) { + return ComposePacket(p.MarshalPacket(p.RequestID, nil)) +} + +// UnmarshalFrom decodes a RequestPacket from the given Buffer into p. +// +// The Request field may alias the passed in Buffer, (e.g. SSH_FXP_WRITE), +// so the buffer passed in should not be reused before RequestPacket.Reset(). +func (p *RequestPacket) UnmarshalFrom(buf *Buffer) error { + typ := PacketType(buf.ConsumeUint8()) + if buf.Err != nil { + return buf.Err + } + + req, err := newPacketFromType(typ) + if err != nil { + return err + } + + *p = RequestPacket{ + RequestID: buf.ConsumeUint32(), + Request: req, + } + + return p.Request.UnmarshalPacketBody(buf) +} + +// UnmarshalBinary decodes a full request packet out of the given data. +// It is assumed that the uint32(length) has already been consumed to receive the data. +// +// This is a convenience implementation primarily intended for tests, +// because this must clone the given data byte slice, +// as Request is not allowed to alias any part of the data byte slice. +func (p *RequestPacket) UnmarshalBinary(data []byte) error { + clone := make([]byte, len(data)) + n := copy(clone, data) + return p.UnmarshalFrom(NewBuffer(clone[:n])) +} + +// ReadFrom provides a simple functional packet reader, +// using the given byte slice as a backing array. +// +// To protect against potential denial of service attacks, +// if the read packet length is longer than maxPacketLength, +// then no packet data will be read, and ErrLongPacket will be returned. +// (On 32-bit int architectures, all packets >= 2^31 in length +// will return ErrLongPacket regardless of maxPacketLength.) +// +// If the read packet length is longer than cap(b), +// then a throw-away slice will allocated to meet the exact packet length. +// This can be used to limit the length of reused buffers, +// while still allowing reception of occasional large packets. +// +// The Request field may alias the passed in byte slice, +// so the byte slice passed in should not be reused before RawPacket.Reset(). +func (p *RequestPacket) ReadFrom(r io.Reader, b []byte, maxPacketLength uint32) error { + b, err := readPacket(r, b, maxPacketLength) + if err != nil { + return err + } + + return p.UnmarshalFrom(NewBuffer(b)) +} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go new file mode 100644 index 000000000..0180326f6 --- /dev/null +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go @@ -0,0 +1,362 @@ +package sshfx + +// LStatPacket defines the SSH_FXP_LSTAT packet. +type LStatPacket struct { + Path string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *LStatPacket) Type() PacketType { + return PacketTypeLStat +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *LStatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Path) // string(path) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeLStat, reqid) + buf.AppendString(p.Path) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *LStatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + *p = LStatPacket{ + Path: buf.ConsumeString(), + } + + return buf.Err +} + +// SetstatPacket defines the SSH_FXP_SETSTAT packet. +type SetstatPacket struct { + Path string + Attrs Attributes +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *SetstatPacket) Type() PacketType { + return PacketTypeSetstat +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *SetstatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Path) + p.Attrs.Len() // string(path) + ATTRS(attrs) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeSetstat, reqid) + buf.AppendString(p.Path) + + p.Attrs.MarshalInto(buf) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *SetstatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + *p = SetstatPacket{ + Path: buf.ConsumeString(), + } + + return p.Attrs.UnmarshalFrom(buf) +} + +// RemovePacket defines the SSH_FXP_REMOVE packet. +type RemovePacket struct { + Path string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *RemovePacket) Type() PacketType { + return PacketTypeRemove +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *RemovePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Path) // string(path) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeRemove, reqid) + buf.AppendString(p.Path) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *RemovePacket) UnmarshalPacketBody(buf *Buffer) (err error) { + *p = RemovePacket{ + Path: buf.ConsumeString(), + } + + return buf.Err +} + +// MkdirPacket defines the SSH_FXP_MKDIR packet. +type MkdirPacket struct { + Path string + Attrs Attributes +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *MkdirPacket) Type() PacketType { + return PacketTypeMkdir +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *MkdirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Path) + p.Attrs.Len() // string(path) + ATTRS(attrs) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeMkdir, reqid) + buf.AppendString(p.Path) + + p.Attrs.MarshalInto(buf) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *MkdirPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + *p = MkdirPacket{ + Path: buf.ConsumeString(), + } + + return p.Attrs.UnmarshalFrom(buf) +} + +// RmdirPacket defines the SSH_FXP_RMDIR packet. +type RmdirPacket struct { + Path string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *RmdirPacket) Type() PacketType { + return PacketTypeRmdir +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *RmdirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Path) // string(path) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeRmdir, reqid) + buf.AppendString(p.Path) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *RmdirPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + *p = RmdirPacket{ + Path: buf.ConsumeString(), + } + + return buf.Err +} + +// RealPathPacket defines the SSH_FXP_REALPATH packet. +type RealPathPacket struct { + Path string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *RealPathPacket) Type() PacketType { + return PacketTypeRealPath +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *RealPathPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Path) // string(path) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeRealPath, reqid) + buf.AppendString(p.Path) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *RealPathPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + *p = RealPathPacket{ + Path: buf.ConsumeString(), + } + + return buf.Err +} + +// StatPacket defines the SSH_FXP_STAT packet. +type StatPacket struct { + Path string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *StatPacket) Type() PacketType { + return PacketTypeStat +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *StatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Path) // string(path) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeStat, reqid) + buf.AppendString(p.Path) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *StatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + *p = StatPacket{ + Path: buf.ConsumeString(), + } + + return buf.Err +} + +// RenamePacket defines the SSH_FXP_RENAME packet. +type RenamePacket struct { + OldPath string + NewPath string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *RenamePacket) Type() PacketType { + return PacketTypeRename +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *RenamePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + // string(oldpath) + string(newpath) + size := 4 + len(p.OldPath) + 4 + len(p.NewPath) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeRename, reqid) + buf.AppendString(p.OldPath) + buf.AppendString(p.NewPath) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *RenamePacket) UnmarshalPacketBody(buf *Buffer) (err error) { + *p = RenamePacket{ + OldPath: buf.ConsumeString(), + NewPath: buf.ConsumeString(), + } + + return buf.Err +} + +// ReadLinkPacket defines the SSH_FXP_READLINK packet. +type ReadLinkPacket struct { + Path string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *ReadLinkPacket) Type() PacketType { + return PacketTypeReadLink +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *ReadLinkPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Path) // string(path) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeReadLink, reqid) + buf.AppendString(p.Path) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *ReadLinkPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + *p = ReadLinkPacket{ + Path: buf.ConsumeString(), + } + + return buf.Err +} + +// SymlinkPacket defines the SSH_FXP_SYMLINK packet. +// +// The order of the arguments to the SSH_FXP_SYMLINK method was inadvertently reversed. +// Unfortunately, the reversal was not noticed until the server was widely deployed. +// Covered in Section 4.1 of https://github.com/openssh/openssh-portable/blob/master/PROTOCOL +type SymlinkPacket struct { + LinkPath string + TargetPath string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *SymlinkPacket) Type() PacketType { + return PacketTypeSymlink +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *SymlinkPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + // string(targetpath) + string(linkpath) + size := 4 + len(p.TargetPath) + 4 + len(p.LinkPath) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeSymlink, reqid) + + // Arguments were inadvertently reversed. + buf.AppendString(p.TargetPath) + buf.AppendString(p.LinkPath) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *SymlinkPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + *p = SymlinkPacket{ + // Arguments were inadvertently reversed. + TargetPath: buf.ConsumeString(), + LinkPath: buf.ConsumeString(), + } + + return buf.Err +} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go new file mode 100644 index 000000000..0143ec0c9 --- /dev/null +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go @@ -0,0 +1,114 @@ +package sshfx + +// FileMode represents a file’s mode and permission bits. +// The bits are defined according to POSIX standards, +// and may not apply to the OS being built for. +type FileMode uint32 + +// Permission flags, defined here to avoid potential inconsistencies in individual OS implementations. +const ( + ModePerm FileMode = 0o0777 // S_IRWXU | S_IRWXG | S_IRWXO + ModeUserRead FileMode = 0o0400 // S_IRUSR + ModeUserWrite FileMode = 0o0200 // S_IWUSR + ModeUserExec FileMode = 0o0100 // S_IXUSR + ModeGroupRead FileMode = 0o0040 // S_IRGRP + ModeGroupWrite FileMode = 0o0020 // S_IWGRP + ModeGroupExec FileMode = 0o0010 // S_IXGRP + ModeOtherRead FileMode = 0o0004 // S_IROTH + ModeOtherWrite FileMode = 0o0002 // S_IWOTH + ModeOtherExec FileMode = 0o0001 // S_IXOTH + + ModeSetUID FileMode = 0o4000 // S_ISUID + ModeSetGID FileMode = 0o2000 // S_ISGID + ModeSticky FileMode = 0o1000 // S_ISVTX + + ModeType FileMode = 0xF000 // S_IFMT + ModeNamedPipe FileMode = 0x1000 // S_IFIFO + ModeCharDevice FileMode = 0x2000 // S_IFCHR + ModeDir FileMode = 0x4000 // S_IFDIR + ModeDevice FileMode = 0x6000 // S_IFBLK + ModeRegular FileMode = 0x8000 // S_IFREG + ModeSymlink FileMode = 0xA000 // S_IFLNK + ModeSocket FileMode = 0xC000 // S_IFSOCK +) + +// IsDir reports whether m describes a directory. +// That is, it tests for m.Type() == ModeDir. +func (m FileMode) IsDir() bool { + return (m & ModeType) == ModeDir +} + +// IsRegular reports whether m describes a regular file. +// That is, it tests for m.Type() == ModeRegular +func (m FileMode) IsRegular() bool { + return (m & ModeType) == ModeRegular +} + +// Perm returns the POSIX permission bits in m (m & ModePerm). +func (m FileMode) Perm() FileMode { + return (m & ModePerm) +} + +// Type returns the type bits in m (m & ModeType). +func (m FileMode) Type() FileMode { + return (m & ModeType) +} + +// String returns a `-rwxrwxrwx` style string representing the `ls -l` POSIX permissions string. +func (m FileMode) String() string { + var buf [10]byte + + switch m.Type() { + case ModeRegular: + buf[0] = '-' + case ModeDir: + buf[0] = 'd' + case ModeSymlink: + buf[0] = 'l' + case ModeDevice: + buf[0] = 'b' + case ModeCharDevice: + buf[0] = 'c' + case ModeNamedPipe: + buf[0] = 'p' + case ModeSocket: + buf[0] = 's' + default: + buf[0] = '?' + } + + const rwx = "rwxrwxrwx" + for i, c := range rwx { + if m&(1<>24), byte(v>>16), byte(v>>8), byte(v)) +} + +func marshalUint64(b []byte, v uint64) []byte { + return marshalUint32(marshalUint32(b, uint32(v>>32)), uint32(v)) +} + +func marshalString(b []byte, v string) []byte { + return append(marshalUint32(b, uint32(len(v))), v...) +} + +func marshalFileInfo(b []byte, fi os.FileInfo) []byte { + // attributes variable struct, and also variable per protocol version + // spec version 3 attributes: + // uint32 flags + // uint64 size present only if flag SSH_FILEXFER_ATTR_SIZE + // uint32 uid present only if flag SSH_FILEXFER_ATTR_UIDGID + // uint32 gid present only if flag SSH_FILEXFER_ATTR_UIDGID + // uint32 permissions present only if flag SSH_FILEXFER_ATTR_PERMISSIONS + // uint32 atime present only if flag SSH_FILEXFER_ACMODTIME + // uint32 mtime present only if flag SSH_FILEXFER_ACMODTIME + // uint32 extended_count present only if flag SSH_FILEXFER_ATTR_EXTENDED + // string extended_type + // string extended_data + // ... more extended data (extended_type - extended_data pairs), + // so that number of pairs equals extended_count + + flags, fileStat := fileStatFromInfo(fi) + + b = marshalUint32(b, flags) + if flags&sshFileXferAttrSize != 0 { + b = marshalUint64(b, fileStat.Size) + } + if flags&sshFileXferAttrUIDGID != 0 { + b = marshalUint32(b, fileStat.UID) + b = marshalUint32(b, fileStat.GID) + } + if flags&sshFileXferAttrPermissions != 0 { + b = marshalUint32(b, fileStat.Mode) + } + if flags&sshFileXferAttrACmodTime != 0 { + b = marshalUint32(b, fileStat.Atime) + b = marshalUint32(b, fileStat.Mtime) + } + + if flags&sshFileXferAttrExtended != 0 { + b = marshalUint32(b, uint32(len(fileStat.Extended))) + + for _, attr := range fileStat.Extended { + b = marshalString(b, attr.ExtType) + b = marshalString(b, attr.ExtData) + } + } + + return b +} + +func marshalStatus(b []byte, err StatusError) []byte { + b = marshalUint32(b, err.Code) + b = marshalString(b, err.msg) + b = marshalString(b, err.lang) + return b +} + +func marshal(b []byte, v interface{}) []byte { + if v == nil { + return b + } + switch v := v.(type) { + case uint8: + return append(b, v) + case uint32: + return marshalUint32(b, v) + case uint64: + return marshalUint64(b, v) + case string: + return marshalString(b, v) + case os.FileInfo: + return marshalFileInfo(b, v) + default: + switch d := reflect.ValueOf(v); d.Kind() { + case reflect.Struct: + for i, n := 0, d.NumField(); i < n; i++ { + b = marshal(b, d.Field(i).Interface()) + } + return b + case reflect.Slice: + for i, n := 0, d.Len(); i < n; i++ { + b = marshal(b, d.Index(i).Interface()) + } + return b + default: + panic(fmt.Sprintf("marshal(%#v): cannot handle type %T", v, v)) + } + } +} + +func unmarshalUint32(b []byte) (uint32, []byte) { + v := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 + return v, b[4:] +} + +func unmarshalUint32Safe(b []byte) (uint32, []byte, error) { + var v uint32 + if len(b) < 4 { + return 0, nil, errShortPacket + } + v, b = unmarshalUint32(b) + return v, b, nil +} + +func unmarshalUint64(b []byte) (uint64, []byte) { + h, b := unmarshalUint32(b) + l, b := unmarshalUint32(b) + return uint64(h)<<32 | uint64(l), b +} + +func unmarshalUint64Safe(b []byte) (uint64, []byte, error) { + var v uint64 + if len(b) < 8 { + return 0, nil, errShortPacket + } + v, b = unmarshalUint64(b) + return v, b, nil +} + +func unmarshalString(b []byte) (string, []byte) { + n, b := unmarshalUint32(b) + return string(b[:n]), b[n:] +} + +func unmarshalStringSafe(b []byte) (string, []byte, error) { + n, b, err := unmarshalUint32Safe(b) + if err != nil { + return "", nil, err + } + if int64(n) > int64(len(b)) { + return "", nil, errShortPacket + } + return string(b[:n]), b[n:], nil +} + +func unmarshalAttrs(b []byte) (*FileStat, []byte) { + flags, b := unmarshalUint32(b) + return unmarshalFileStat(flags, b) +} + +func unmarshalFileStat(flags uint32, b []byte) (*FileStat, []byte) { + var fs FileStat + if flags&sshFileXferAttrSize == sshFileXferAttrSize { + fs.Size, b, _ = unmarshalUint64Safe(b) + } + if flags&sshFileXferAttrUIDGID == sshFileXferAttrUIDGID { + fs.UID, b, _ = unmarshalUint32Safe(b) + } + if flags&sshFileXferAttrUIDGID == sshFileXferAttrUIDGID { + fs.GID, b, _ = unmarshalUint32Safe(b) + } + if flags&sshFileXferAttrPermissions == sshFileXferAttrPermissions { + fs.Mode, b, _ = unmarshalUint32Safe(b) + } + if flags&sshFileXferAttrACmodTime == sshFileXferAttrACmodTime { + fs.Atime, b, _ = unmarshalUint32Safe(b) + fs.Mtime, b, _ = unmarshalUint32Safe(b) + } + if flags&sshFileXferAttrExtended == sshFileXferAttrExtended { + var count uint32 + count, b, _ = unmarshalUint32Safe(b) + ext := make([]StatExtended, count) + for i := uint32(0); i < count; i++ { + var typ string + var data string + typ, b, _ = unmarshalStringSafe(b) + data, b, _ = unmarshalStringSafe(b) + ext[i] = StatExtended{ + ExtType: typ, + ExtData: data, + } + } + fs.Extended = ext + } + return &fs, b +} + +func unmarshalStatus(id uint32, data []byte) error { + sid, data := unmarshalUint32(data) + if sid != id { + return &unexpectedIDErr{id, sid} + } + code, data := unmarshalUint32(data) + msg, data, _ := unmarshalStringSafe(data) + lang, _, _ := unmarshalStringSafe(data) + return &StatusError{ + Code: code, + msg: msg, + lang: lang, + } +} + +type packetMarshaler interface { + marshalPacket() (header, payload []byte, err error) +} + +func marshalPacket(m encoding.BinaryMarshaler) (header, payload []byte, err error) { + if m, ok := m.(packetMarshaler); ok { + return m.marshalPacket() + } + + header, err = m.MarshalBinary() + return +} + +// sendPacket marshals p according to RFC 4234. +func sendPacket(w io.Writer, m encoding.BinaryMarshaler) error { + header, payload, err := marshalPacket(m) + if err != nil { + return fmt.Errorf("binary marshaller failed: %w", err) + } + + length := len(header) + len(payload) - 4 // subtract the uint32(length) from the start + if debugDumpTxPacketBytes { + debug("send packet: %s %d bytes %x%x", fxp(header[4]), length, header[5:], payload) + } else if debugDumpTxPacket { + debug("send packet: %s %d bytes", fxp(header[4]), length) + } + + binary.BigEndian.PutUint32(header[:4], uint32(length)) + + if _, err := w.Write(header); err != nil { + return fmt.Errorf("failed to send packet: %w", err) + } + + if len(payload) > 0 { + if _, err := w.Write(payload); err != nil { + return fmt.Errorf("failed to send packet payload: %w", err) + } + } + + return nil +} + +func recvPacket(r io.Reader, alloc *allocator, orderID uint32) (uint8, []byte, error) { + var b []byte + if alloc != nil { + b = alloc.GetPage(orderID) + } else { + b = make([]byte, 4) + } + if _, err := io.ReadFull(r, b[:4]); err != nil { + return 0, nil, err + } + length, _ := unmarshalUint32(b) + if length > maxMsgLength { + debug("recv packet %d bytes too long", length) + return 0, nil, errLongPacket + } + if length == 0 { + debug("recv packet of 0 bytes too short") + return 0, nil, errShortPacket + } + if alloc == nil { + b = make([]byte, length) + } + if _, err := io.ReadFull(r, b[:length]); err != nil { + // ReadFull only returns EOF if it has read no bytes. + // In this case, that means a partial packet, and thus unexpected. + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + debug("recv packet %d bytes: err %v", length, err) + return 0, nil, err + } + if debugDumpRxPacketBytes { + debug("recv packet: %s %d bytes %x", fxp(b[0]), length, b[1:length]) + } else if debugDumpRxPacket { + debug("recv packet: %s %d bytes", fxp(b[0]), length) + } + return b[0], b[1:length], nil +} + +type extensionPair struct { + Name string + Data string +} + +func unmarshalExtensionPair(b []byte) (extensionPair, []byte, error) { + var ep extensionPair + var err error + ep.Name, b, err = unmarshalStringSafe(b) + if err != nil { + return ep, b, err + } + ep.Data, b, err = unmarshalStringSafe(b) + return ep, b, err +} + +// Here starts the definition of packets along with their MarshalBinary +// implementations. +// Manually writing the marshalling logic wins us a lot of time and +// allocation. + +type sshFxInitPacket struct { + Version uint32 + Extensions []extensionPair +} + +func (p *sshFxInitPacket) MarshalBinary() ([]byte, error) { + l := 4 + 1 + 4 // uint32(length) + byte(type) + uint32(version) + for _, e := range p.Extensions { + l += 4 + len(e.Name) + 4 + len(e.Data) + } + + b := make([]byte, 4, l) + b = append(b, sshFxpInit) + b = marshalUint32(b, p.Version) + + for _, e := range p.Extensions { + b = marshalString(b, e.Name) + b = marshalString(b, e.Data) + } + + return b, nil +} + +func (p *sshFxInitPacket) UnmarshalBinary(b []byte) error { + var err error + if p.Version, b, err = unmarshalUint32Safe(b); err != nil { + return err + } + for len(b) > 0 { + var ep extensionPair + ep, b, err = unmarshalExtensionPair(b) + if err != nil { + return err + } + p.Extensions = append(p.Extensions, ep) + } + return nil +} + +type sshFxVersionPacket struct { + Version uint32 + Extensions []sshExtensionPair +} + +type sshExtensionPair struct { + Name, Data string +} + +func (p *sshFxVersionPacket) MarshalBinary() ([]byte, error) { + l := 4 + 1 + 4 // uint32(length) + byte(type) + uint32(version) + for _, e := range p.Extensions { + l += 4 + len(e.Name) + 4 + len(e.Data) + } + + b := make([]byte, 4, l) + b = append(b, sshFxpVersion) + b = marshalUint32(b, p.Version) + + for _, e := range p.Extensions { + b = marshalString(b, e.Name) + b = marshalString(b, e.Data) + } + + return b, nil +} + +func marshalIDStringPacket(packetType byte, id uint32, str string) ([]byte, error) { + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + len(str) + + b := make([]byte, 4, l) + b = append(b, packetType) + b = marshalUint32(b, id) + b = marshalString(b, str) + + return b, nil +} + +func unmarshalIDString(b []byte, id *uint32, str *string) error { + var err error + *id, b, err = unmarshalUint32Safe(b) + if err != nil { + return err + } + *str, _, err = unmarshalStringSafe(b) + return err +} + +type sshFxpReaddirPacket struct { + ID uint32 + Handle string +} + +func (p *sshFxpReaddirPacket) id() uint32 { return p.ID } + +func (p *sshFxpReaddirPacket) MarshalBinary() ([]byte, error) { + return marshalIDStringPacket(sshFxpReaddir, p.ID, p.Handle) +} + +func (p *sshFxpReaddirPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Handle) +} + +type sshFxpOpendirPacket struct { + ID uint32 + Path string +} + +func (p *sshFxpOpendirPacket) id() uint32 { return p.ID } + +func (p *sshFxpOpendirPacket) MarshalBinary() ([]byte, error) { + return marshalIDStringPacket(sshFxpOpendir, p.ID, p.Path) +} + +func (p *sshFxpOpendirPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpLstatPacket struct { + ID uint32 + Path string +} + +func (p *sshFxpLstatPacket) id() uint32 { return p.ID } + +func (p *sshFxpLstatPacket) MarshalBinary() ([]byte, error) { + return marshalIDStringPacket(sshFxpLstat, p.ID, p.Path) +} + +func (p *sshFxpLstatPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpStatPacket struct { + ID uint32 + Path string +} + +func (p *sshFxpStatPacket) id() uint32 { return p.ID } + +func (p *sshFxpStatPacket) MarshalBinary() ([]byte, error) { + return marshalIDStringPacket(sshFxpStat, p.ID, p.Path) +} + +func (p *sshFxpStatPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpFstatPacket struct { + ID uint32 + Handle string +} + +func (p *sshFxpFstatPacket) id() uint32 { return p.ID } + +func (p *sshFxpFstatPacket) MarshalBinary() ([]byte, error) { + return marshalIDStringPacket(sshFxpFstat, p.ID, p.Handle) +} + +func (p *sshFxpFstatPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Handle) +} + +type sshFxpClosePacket struct { + ID uint32 + Handle string +} + +func (p *sshFxpClosePacket) id() uint32 { return p.ID } + +func (p *sshFxpClosePacket) MarshalBinary() ([]byte, error) { + return marshalIDStringPacket(sshFxpClose, p.ID, p.Handle) +} + +func (p *sshFxpClosePacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Handle) +} + +type sshFxpRemovePacket struct { + ID uint32 + Filename string +} + +func (p *sshFxpRemovePacket) id() uint32 { return p.ID } + +func (p *sshFxpRemovePacket) MarshalBinary() ([]byte, error) { + return marshalIDStringPacket(sshFxpRemove, p.ID, p.Filename) +} + +func (p *sshFxpRemovePacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Filename) +} + +type sshFxpRmdirPacket struct { + ID uint32 + Path string +} + +func (p *sshFxpRmdirPacket) id() uint32 { return p.ID } + +func (p *sshFxpRmdirPacket) MarshalBinary() ([]byte, error) { + return marshalIDStringPacket(sshFxpRmdir, p.ID, p.Path) +} + +func (p *sshFxpRmdirPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpSymlinkPacket struct { + ID uint32 + + // The order of the arguments to the SSH_FXP_SYMLINK method was inadvertently reversed. + // Unfortunately, the reversal was not noticed until the server was widely deployed. + // Covered in Section 4.1 of https://github.com/openssh/openssh-portable/blob/master/PROTOCOL + + Targetpath string + Linkpath string +} + +func (p *sshFxpSymlinkPacket) id() uint32 { return p.ID } + +func (p *sshFxpSymlinkPacket) MarshalBinary() ([]byte, error) { + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + len(p.Targetpath) + + 4 + len(p.Linkpath) + + b := make([]byte, 4, l) + b = append(b, sshFxpSymlink) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Targetpath) + b = marshalString(b, p.Linkpath) + + return b, nil +} + +func (p *sshFxpSymlinkPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Targetpath, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Linkpath, _, err = unmarshalStringSafe(b); err != nil { + return err + } + return nil +} + +type sshFxpHardlinkPacket struct { + ID uint32 + Oldpath string + Newpath string +} + +func (p *sshFxpHardlinkPacket) id() uint32 { return p.ID } + +func (p *sshFxpHardlinkPacket) MarshalBinary() ([]byte, error) { + const ext = "hardlink@openssh.com" + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + len(ext) + + 4 + len(p.Oldpath) + + 4 + len(p.Newpath) + + b := make([]byte, 4, l) + b = append(b, sshFxpExtended) + b = marshalUint32(b, p.ID) + b = marshalString(b, ext) + b = marshalString(b, p.Oldpath) + b = marshalString(b, p.Newpath) + + return b, nil +} + +type sshFxpReadlinkPacket struct { + ID uint32 + Path string +} + +func (p *sshFxpReadlinkPacket) id() uint32 { return p.ID } + +func (p *sshFxpReadlinkPacket) MarshalBinary() ([]byte, error) { + return marshalIDStringPacket(sshFxpReadlink, p.ID, p.Path) +} + +func (p *sshFxpReadlinkPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpRealpathPacket struct { + ID uint32 + Path string +} + +func (p *sshFxpRealpathPacket) id() uint32 { return p.ID } + +func (p *sshFxpRealpathPacket) MarshalBinary() ([]byte, error) { + return marshalIDStringPacket(sshFxpRealpath, p.ID, p.Path) +} + +func (p *sshFxpRealpathPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpNameAttr struct { + Name string + LongName string + Attrs []interface{} +} + +func (p *sshFxpNameAttr) MarshalBinary() ([]byte, error) { + var b []byte + b = marshalString(b, p.Name) + b = marshalString(b, p.LongName) + for _, attr := range p.Attrs { + b = marshal(b, attr) + } + return b, nil +} + +type sshFxpNamePacket struct { + ID uint32 + NameAttrs []*sshFxpNameAttr +} + +func (p *sshFxpNamePacket) marshalPacket() ([]byte, []byte, error) { + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + + b := make([]byte, 4, l) + b = append(b, sshFxpName) + b = marshalUint32(b, p.ID) + b = marshalUint32(b, uint32(len(p.NameAttrs))) + + var payload []byte + for _, na := range p.NameAttrs { + ab, err := na.MarshalBinary() + if err != nil { + return nil, nil, err + } + + payload = append(payload, ab...) + } + + return b, payload, nil +} + +func (p *sshFxpNamePacket) MarshalBinary() ([]byte, error) { + header, payload, err := p.marshalPacket() + return append(header, payload...), err +} + +type sshFxpOpenPacket struct { + ID uint32 + Path string + Pflags uint32 + Flags uint32 // ignored +} + +func (p *sshFxpOpenPacket) id() uint32 { return p.ID } + +func (p *sshFxpOpenPacket) MarshalBinary() ([]byte, error) { + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + len(p.Path) + + 4 + 4 + + b := make([]byte, 4, l) + b = append(b, sshFxpOpen) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Path) + b = marshalUint32(b, p.Pflags) + b = marshalUint32(b, p.Flags) + + return b, nil +} + +func (p *sshFxpOpenPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Path, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Pflags, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Flags, _, err = unmarshalUint32Safe(b); err != nil { + return err + } + return nil +} + +type sshFxpReadPacket struct { + ID uint32 + Len uint32 + Offset uint64 + Handle string +} + +func (p *sshFxpReadPacket) id() uint32 { return p.ID } + +func (p *sshFxpReadPacket) MarshalBinary() ([]byte, error) { + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + len(p.Handle) + + 8 + 4 // uint64 + uint32 + + b := make([]byte, 4, l) + b = append(b, sshFxpRead) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Handle) + b = marshalUint64(b, p.Offset) + b = marshalUint32(b, p.Len) + + return b, nil +} + +func (p *sshFxpReadPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Offset, b, err = unmarshalUint64Safe(b); err != nil { + return err + } else if p.Len, _, err = unmarshalUint32Safe(b); err != nil { + return err + } + return nil +} + +// We need allocate bigger slices with extra capacity to avoid a re-allocation in sshFxpDataPacket.MarshalBinary +// So, we need: uint32(length) + byte(type) + uint32(id) + uint32(data_length) +const dataHeaderLen = 4 + 1 + 4 + 4 + +func (p *sshFxpReadPacket) getDataSlice(alloc *allocator, orderID uint32) []byte { + dataLen := p.Len + if dataLen > maxTxPacket { + dataLen = maxTxPacket + } + + if alloc != nil { + // GetPage returns a slice with capacity = maxMsgLength this is enough to avoid new allocations in + // sshFxpDataPacket.MarshalBinary + return alloc.GetPage(orderID)[:dataLen] + } + + // allocate with extra space for the header + return make([]byte, dataLen, dataLen+dataHeaderLen) +} + +type sshFxpRenamePacket struct { + ID uint32 + Oldpath string + Newpath string +} + +func (p *sshFxpRenamePacket) id() uint32 { return p.ID } + +func (p *sshFxpRenamePacket) MarshalBinary() ([]byte, error) { + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + len(p.Oldpath) + + 4 + len(p.Newpath) + + b := make([]byte, 4, l) + b = append(b, sshFxpRename) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Oldpath) + b = marshalString(b, p.Newpath) + + return b, nil +} + +func (p *sshFxpRenamePacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Oldpath, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Newpath, _, err = unmarshalStringSafe(b); err != nil { + return err + } + return nil +} + +type sshFxpPosixRenamePacket struct { + ID uint32 + Oldpath string + Newpath string +} + +func (p *sshFxpPosixRenamePacket) id() uint32 { return p.ID } + +func (p *sshFxpPosixRenamePacket) MarshalBinary() ([]byte, error) { + const ext = "posix-rename@openssh.com" + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + len(ext) + + 4 + len(p.Oldpath) + + 4 + len(p.Newpath) + + b := make([]byte, 4, l) + b = append(b, sshFxpExtended) + b = marshalUint32(b, p.ID) + b = marshalString(b, ext) + b = marshalString(b, p.Oldpath) + b = marshalString(b, p.Newpath) + + return b, nil +} + +type sshFxpWritePacket struct { + ID uint32 + Length uint32 + Offset uint64 + Handle string + Data []byte +} + +func (p *sshFxpWritePacket) id() uint32 { return p.ID } + +func (p *sshFxpWritePacket) marshalPacket() ([]byte, []byte, error) { + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + len(p.Handle) + + 8 + // uint64 + 4 + + b := make([]byte, 4, l) + b = append(b, sshFxpWrite) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Handle) + b = marshalUint64(b, p.Offset) + b = marshalUint32(b, p.Length) + + return b, p.Data, nil +} + +func (p *sshFxpWritePacket) MarshalBinary() ([]byte, error) { + header, payload, err := p.marshalPacket() + return append(header, payload...), err +} + +func (p *sshFxpWritePacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Offset, b, err = unmarshalUint64Safe(b); err != nil { + return err + } else if p.Length, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if uint32(len(b)) < p.Length { + return errShortPacket + } + + p.Data = b[:p.Length] + return nil +} + +type sshFxpMkdirPacket struct { + ID uint32 + Flags uint32 // ignored + Path string +} + +func (p *sshFxpMkdirPacket) id() uint32 { return p.ID } + +func (p *sshFxpMkdirPacket) MarshalBinary() ([]byte, error) { + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + len(p.Path) + + 4 // uint32 + + b := make([]byte, 4, l) + b = append(b, sshFxpMkdir) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Path) + b = marshalUint32(b, p.Flags) + + return b, nil +} + +func (p *sshFxpMkdirPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Path, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Flags, _, err = unmarshalUint32Safe(b); err != nil { + return err + } + return nil +} + +type sshFxpSetstatPacket struct { + ID uint32 + Flags uint32 + Path string + Attrs interface{} +} + +type sshFxpFsetstatPacket struct { + ID uint32 + Flags uint32 + Handle string + Attrs interface{} +} + +func (p *sshFxpSetstatPacket) id() uint32 { return p.ID } +func (p *sshFxpFsetstatPacket) id() uint32 { return p.ID } + +func (p *sshFxpSetstatPacket) marshalPacket() ([]byte, []byte, error) { + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + len(p.Path) + + 4 // uint32 + + b := make([]byte, 4, l) + b = append(b, sshFxpSetstat) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Path) + b = marshalUint32(b, p.Flags) + + payload := marshal(nil, p.Attrs) + + return b, payload, nil +} + +func (p *sshFxpSetstatPacket) MarshalBinary() ([]byte, error) { + header, payload, err := p.marshalPacket() + return append(header, payload...), err +} + +func (p *sshFxpFsetstatPacket) marshalPacket() ([]byte, []byte, error) { + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + len(p.Handle) + + 4 // uint32 + + b := make([]byte, 4, l) + b = append(b, sshFxpFsetstat) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Handle) + b = marshalUint32(b, p.Flags) + + payload := marshal(nil, p.Attrs) + + return b, payload, nil +} + +func (p *sshFxpFsetstatPacket) MarshalBinary() ([]byte, error) { + header, payload, err := p.marshalPacket() + return append(header, payload...), err +} + +func (p *sshFxpSetstatPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Path, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Flags, b, err = unmarshalUint32Safe(b); err != nil { + return err + } + p.Attrs = b + return nil +} + +func (p *sshFxpFsetstatPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Flags, b, err = unmarshalUint32Safe(b); err != nil { + return err + } + p.Attrs = b + return nil +} + +type sshFxpHandlePacket struct { + ID uint32 + Handle string +} + +func (p *sshFxpHandlePacket) MarshalBinary() ([]byte, error) { + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + len(p.Handle) + + b := make([]byte, 4, l) + b = append(b, sshFxpHandle) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Handle) + + return b, nil +} + +type sshFxpStatusPacket struct { + ID uint32 + StatusError +} + +func (p *sshFxpStatusPacket) MarshalBinary() ([]byte, error) { + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + + 4 + len(p.StatusError.msg) + + 4 + len(p.StatusError.lang) + + b := make([]byte, 4, l) + b = append(b, sshFxpStatus) + b = marshalUint32(b, p.ID) + b = marshalStatus(b, p.StatusError) + + return b, nil +} + +type sshFxpDataPacket struct { + ID uint32 + Length uint32 + Data []byte +} + +func (p *sshFxpDataPacket) marshalPacket() ([]byte, []byte, error) { + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + + b := make([]byte, 4, l) + b = append(b, sshFxpData) + b = marshalUint32(b, p.ID) + b = marshalUint32(b, p.Length) + + return b, p.Data, nil +} + +// MarshalBinary encodes the receiver into a binary form and returns the result. +// To avoid a new allocation the Data slice must have a capacity >= Length + 9 +// +// This is hand-coded rather than just append(header, payload...), +// in order to try and reuse the r.Data backing store in the packet. +func (p *sshFxpDataPacket) MarshalBinary() ([]byte, error) { + b := append(p.Data, make([]byte, dataHeaderLen)...) + copy(b[dataHeaderLen:], p.Data[:p.Length]) + // b[0:4] will be overwritten with the length in sendPacket + b[4] = sshFxpData + binary.BigEndian.PutUint32(b[5:9], p.ID) + binary.BigEndian.PutUint32(b[9:13], p.Length) + return b, nil +} + +func (p *sshFxpDataPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Length, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if uint32(len(b)) < p.Length { + return errShortPacket + } + + p.Data = b[:p.Length] + return nil +} + +type sshFxpStatvfsPacket struct { + ID uint32 + Path string +} + +func (p *sshFxpStatvfsPacket) id() uint32 { return p.ID } + +func (p *sshFxpStatvfsPacket) MarshalBinary() ([]byte, error) { + const ext = "statvfs@openssh.com" + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + len(ext) + + 4 + len(p.Path) + + b := make([]byte, 4, l) + b = append(b, sshFxpExtended) + b = marshalUint32(b, p.ID) + b = marshalString(b, ext) + b = marshalString(b, p.Path) + + return b, nil +} + +// A StatVFS contains statistics about a filesystem. +type StatVFS struct { + ID uint32 + Bsize uint64 /* file system block size */ + Frsize uint64 /* fundamental fs block size */ + Blocks uint64 /* number of blocks (unit f_frsize) */ + Bfree uint64 /* free blocks in file system */ + Bavail uint64 /* free blocks for non-root */ + Files uint64 /* total file inodes */ + Ffree uint64 /* free file inodes */ + Favail uint64 /* free file inodes for to non-root */ + Fsid uint64 /* file system id */ + Flag uint64 /* bit mask of f_flag values */ + Namemax uint64 /* maximum filename length */ +} + +// TotalSpace calculates the amount of total space in a filesystem. +func (p *StatVFS) TotalSpace() uint64 { + return p.Frsize * p.Blocks +} + +// FreeSpace calculates the amount of free space in a filesystem. +func (p *StatVFS) FreeSpace() uint64 { + return p.Frsize * p.Bfree +} + +// marshalPacket converts to ssh_FXP_EXTENDED_REPLY packet binary format +func (p *StatVFS) marshalPacket() ([]byte, []byte, error) { + header := []byte{0, 0, 0, 0, sshFxpExtendedReply} + + var buf bytes.Buffer + err := binary.Write(&buf, binary.BigEndian, p) + + return header, buf.Bytes(), err +} + +// MarshalBinary encodes the StatVFS as an SSH_FXP_EXTENDED_REPLY packet. +func (p *StatVFS) MarshalBinary() ([]byte, error) { + header, payload, err := p.marshalPacket() + return append(header, payload...), err +} + +type sshFxpFsyncPacket struct { + ID uint32 + Handle string +} + +func (p *sshFxpFsyncPacket) id() uint32 { return p.ID } + +func (p *sshFxpFsyncPacket) MarshalBinary() ([]byte, error) { + const ext = "fsync@openssh.com" + l := 4 + 1 + 4 + // uint32(length) + byte(type) + uint32(id) + 4 + len(ext) + + 4 + len(p.Handle) + + b := make([]byte, 4, l) + b = append(b, sshFxpExtended) + b = marshalUint32(b, p.ID) + b = marshalString(b, ext) + b = marshalString(b, p.Handle) + + return b, nil +} + +type sshFxpExtendedPacket struct { + ID uint32 + ExtendedRequest string + SpecificPacket interface { + serverRespondablePacket + readonly() bool + } +} + +func (p *sshFxpExtendedPacket) id() uint32 { return p.ID } +func (p *sshFxpExtendedPacket) readonly() bool { + if p.SpecificPacket == nil { + return true + } + return p.SpecificPacket.readonly() +} + +func (p *sshFxpExtendedPacket) respond(svr *Server) responsePacket { + if p.SpecificPacket == nil { + return statusFromError(p.ID, nil) + } + return p.SpecificPacket.respond(svr) +} + +func (p *sshFxpExtendedPacket) UnmarshalBinary(b []byte) error { + var err error + bOrig := b + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.ExtendedRequest, _, err = unmarshalStringSafe(b); err != nil { + return err + } + + // specific unmarshalling + switch p.ExtendedRequest { + case "statvfs@openssh.com": + p.SpecificPacket = &sshFxpExtendedPacketStatVFS{} + case "posix-rename@openssh.com": + p.SpecificPacket = &sshFxpExtendedPacketPosixRename{} + case "hardlink@openssh.com": + p.SpecificPacket = &sshFxpExtendedPacketHardlink{} + default: + return fmt.Errorf("packet type %v: %w", p.SpecificPacket, errUnknownExtendedPacket) + } + + return p.SpecificPacket.UnmarshalBinary(bOrig) +} + +type sshFxpExtendedPacketStatVFS struct { + ID uint32 + ExtendedRequest string + Path string +} + +func (p *sshFxpExtendedPacketStatVFS) id() uint32 { return p.ID } +func (p *sshFxpExtendedPacketStatVFS) readonly() bool { return true } +func (p *sshFxpExtendedPacketStatVFS) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.ExtendedRequest, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Path, _, err = unmarshalStringSafe(b); err != nil { + return err + } + return nil +} + +type sshFxpExtendedPacketPosixRename struct { + ID uint32 + ExtendedRequest string + Oldpath string + Newpath string +} + +func (p *sshFxpExtendedPacketPosixRename) id() uint32 { return p.ID } +func (p *sshFxpExtendedPacketPosixRename) readonly() bool { return false } +func (p *sshFxpExtendedPacketPosixRename) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.ExtendedRequest, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Oldpath, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Newpath, _, err = unmarshalStringSafe(b); err != nil { + return err + } + return nil +} + +func (p *sshFxpExtendedPacketPosixRename) respond(s *Server) responsePacket { + err := os.Rename(s.toLocalPath(p.Oldpath), s.toLocalPath(p.Newpath)) + return statusFromError(p.ID, err) +} + +type sshFxpExtendedPacketHardlink struct { + ID uint32 + ExtendedRequest string + Oldpath string + Newpath string +} + +// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL +func (p *sshFxpExtendedPacketHardlink) id() uint32 { return p.ID } +func (p *sshFxpExtendedPacketHardlink) readonly() bool { return true } +func (p *sshFxpExtendedPacketHardlink) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.ExtendedRequest, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Oldpath, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Newpath, _, err = unmarshalStringSafe(b); err != nil { + return err + } + return nil +} + +func (p *sshFxpExtendedPacketHardlink) respond(s *Server) responsePacket { + err := os.Link(s.toLocalPath(p.Oldpath), s.toLocalPath(p.Newpath)) + return statusFromError(p.ID, err) +} diff --git a/vendor/github.com/pkg/sftp/pool.go b/vendor/github.com/pkg/sftp/pool.go new file mode 100644 index 000000000..361262906 --- /dev/null +++ b/vendor/github.com/pkg/sftp/pool.go @@ -0,0 +1,79 @@ +package sftp + +// bufPool provides a pool of byte-slices to be reused in various parts of the package. +// It is safe to use concurrently through a pointer. +type bufPool struct { + ch chan []byte + blen int +} + +func newBufPool(depth, bufLen int) *bufPool { + return &bufPool{ + ch: make(chan []byte, depth), + blen: bufLen, + } +} + +func (p *bufPool) Get() []byte { + if p.blen <= 0 { + panic("bufPool: new buffer creation length must be greater than zero") + } + + for { + select { + case b := <-p.ch: + if cap(b) < p.blen { + // just in case: throw away any buffer with insufficient capacity. + continue + } + + return b[:p.blen] + + default: + return make([]byte, p.blen) + } + } +} + +func (p *bufPool) Put(b []byte) { + if p == nil { + // functional default: no reuse. + return + } + + if cap(b) < p.blen || cap(b) > p.blen*2 { + // DO NOT reuse buffers with insufficient capacity. + // This could cause panics when resizing to p.blen. + + // DO NOT reuse buffers with excessive capacity. + // This could cause memory leaks. + return + } + + select { + case p.ch <- b: + default: + } +} + +type resChanPool chan chan result + +func newResChanPool(depth int) resChanPool { + return make(chan chan result, depth) +} + +func (p resChanPool) Get() chan result { + select { + case ch := <-p: + return ch + default: + return make(chan result, 1) + } +} + +func (p resChanPool) Put(ch chan result) { + select { + case p <- ch: + default: + } +} diff --git a/vendor/github.com/pkg/sftp/release.go b/vendor/github.com/pkg/sftp/release.go new file mode 100644 index 000000000..9ecedc441 --- /dev/null +++ b/vendor/github.com/pkg/sftp/release.go @@ -0,0 +1,6 @@ +//go:build !debug +// +build !debug + +package sftp + +func debug(fmt string, args ...interface{}) {} diff --git a/vendor/github.com/pkg/sftp/request-attrs.go b/vendor/github.com/pkg/sftp/request-attrs.go new file mode 100644 index 000000000..b5c95b4ad --- /dev/null +++ b/vendor/github.com/pkg/sftp/request-attrs.go @@ -0,0 +1,63 @@ +package sftp + +// Methods on the Request object to make working with the Flags bitmasks and +// Attr(ibutes) byte blob easier. Use Pflags() when working with an Open/Write +// request and AttrFlags() and Attributes() when working with SetStat requests. +import "os" + +// FileOpenFlags defines Open and Write Flags. Correlate directly with with os.OpenFile flags +// (https://golang.org/pkg/os/#pkg-constants). +type FileOpenFlags struct { + Read, Write, Append, Creat, Trunc, Excl bool +} + +func newFileOpenFlags(flags uint32) FileOpenFlags { + return FileOpenFlags{ + Read: flags&sshFxfRead != 0, + Write: flags&sshFxfWrite != 0, + Append: flags&sshFxfAppend != 0, + Creat: flags&sshFxfCreat != 0, + Trunc: flags&sshFxfTrunc != 0, + Excl: flags&sshFxfExcl != 0, + } +} + +// Pflags converts the bitmap/uint32 from SFTP Open packet pflag values, +// into a FileOpenFlags struct with booleans set for flags set in bitmap. +func (r *Request) Pflags() FileOpenFlags { + return newFileOpenFlags(r.Flags) +} + +// FileAttrFlags that indicate whether SFTP file attributes were passed. When a flag is +// true the corresponding attribute should be available from the FileStat +// object returned by Attributes method. Used with SetStat. +type FileAttrFlags struct { + Size, UidGid, Permissions, Acmodtime bool +} + +func newFileAttrFlags(flags uint32) FileAttrFlags { + return FileAttrFlags{ + Size: (flags & sshFileXferAttrSize) != 0, + UidGid: (flags & sshFileXferAttrUIDGID) != 0, + Permissions: (flags & sshFileXferAttrPermissions) != 0, + Acmodtime: (flags & sshFileXferAttrACmodTime) != 0, + } +} + +// AttrFlags returns a FileAttrFlags boolean struct based on the +// bitmap/uint32 file attribute flags from the SFTP packaet. +func (r *Request) AttrFlags() FileAttrFlags { + return newFileAttrFlags(r.Flags) +} + +// FileMode returns the Mode SFTP file attributes wrapped as os.FileMode +func (a FileStat) FileMode() os.FileMode { + return os.FileMode(a.Mode) +} + +// Attributes parses file attributes byte blob and return them in a +// FileStat object. +func (r *Request) Attributes() *FileStat { + fs, _ := unmarshalFileStat(r.Flags, r.Attrs) + return fs +} diff --git a/vendor/github.com/pkg/sftp/request-errors.go b/vendor/github.com/pkg/sftp/request-errors.go new file mode 100644 index 000000000..6505b5c74 --- /dev/null +++ b/vendor/github.com/pkg/sftp/request-errors.go @@ -0,0 +1,54 @@ +package sftp + +type fxerr uint32 + +// Error types that match the SFTP's SSH_FXP_STATUS codes. Gives you more +// direct control of the errors being sent vs. letting the library work them +// out from the standard os/io errors. +const ( + ErrSSHFxOk = fxerr(sshFxOk) + ErrSSHFxEOF = fxerr(sshFxEOF) + ErrSSHFxNoSuchFile = fxerr(sshFxNoSuchFile) + ErrSSHFxPermissionDenied = fxerr(sshFxPermissionDenied) + ErrSSHFxFailure = fxerr(sshFxFailure) + ErrSSHFxBadMessage = fxerr(sshFxBadMessage) + ErrSSHFxNoConnection = fxerr(sshFxNoConnection) + ErrSSHFxConnectionLost = fxerr(sshFxConnectionLost) + ErrSSHFxOpUnsupported = fxerr(sshFxOPUnsupported) +) + +// Deprecated error types, these are aliases for the new ones, please use the new ones directly +const ( + ErrSshFxOk = ErrSSHFxOk + ErrSshFxEof = ErrSSHFxEOF + ErrSshFxNoSuchFile = ErrSSHFxNoSuchFile + ErrSshFxPermissionDenied = ErrSSHFxPermissionDenied + ErrSshFxFailure = ErrSSHFxFailure + ErrSshFxBadMessage = ErrSSHFxBadMessage + ErrSshFxNoConnection = ErrSSHFxNoConnection + ErrSshFxConnectionLost = ErrSSHFxConnectionLost + ErrSshFxOpUnsupported = ErrSSHFxOpUnsupported +) + +func (e fxerr) Error() string { + switch e { + case ErrSSHFxOk: + return "OK" + case ErrSSHFxEOF: + return "EOF" + case ErrSSHFxNoSuchFile: + return "no such file" + case ErrSSHFxPermissionDenied: + return "permission denied" + case ErrSSHFxBadMessage: + return "bad message" + case ErrSSHFxNoConnection: + return "no connection" + case ErrSSHFxConnectionLost: + return "connection lost" + case ErrSSHFxOpUnsupported: + return "operation unsupported" + default: + return "failure" + } +} diff --git a/vendor/github.com/pkg/sftp/request-example.go b/vendor/github.com/pkg/sftp/request-example.go new file mode 100644 index 000000000..519b3b768 --- /dev/null +++ b/vendor/github.com/pkg/sftp/request-example.go @@ -0,0 +1,647 @@ +package sftp + +// This serves as an example of how to implement the request server handler as +// well as a dummy backend for testing. It implements an in-memory backend that +// works as a very simple filesystem with simple flat key-value lookup system. + +import ( + "errors" + "io" + "os" + "path" + "sort" + "strings" + "sync" + "syscall" + "time" +) + +const maxSymlinkFollows = 5 + +var errTooManySymlinks = errors.New("too many symbolic links") + +// InMemHandler returns a Hanlders object with the test handlers. +func InMemHandler() Handlers { + root := &root{ + rootFile: &memFile{name: "/", modtime: time.Now(), isdir: true}, + files: make(map[string]*memFile), + } + return Handlers{root, root, root, root} +} + +// Example Handlers +func (fs *root) Fileread(r *Request) (io.ReaderAt, error) { + flags := r.Pflags() + if !flags.Read { + // sanity check + return nil, os.ErrInvalid + } + + return fs.OpenFile(r) +} + +func (fs *root) Filewrite(r *Request) (io.WriterAt, error) { + flags := r.Pflags() + if !flags.Write { + // sanity check + return nil, os.ErrInvalid + } + + return fs.OpenFile(r) +} + +func (fs *root) OpenFile(r *Request) (WriterAtReaderAt, error) { + if fs.mockErr != nil { + return nil, fs.mockErr + } + _ = r.WithContext(r.Context()) // initialize context for deadlock testing + + fs.mu.Lock() + defer fs.mu.Unlock() + + return fs.openfile(r.Filepath, r.Flags) +} + +func (fs *root) putfile(pathname string, file *memFile) error { + pathname, err := fs.canonName(pathname) + if err != nil { + return err + } + + if !strings.HasPrefix(pathname, "/") { + return os.ErrInvalid + } + + if _, err := fs.lfetch(pathname); err != os.ErrNotExist { + return os.ErrExist + } + + file.name = pathname + fs.files[pathname] = file + + return nil +} + +func (fs *root) openfile(pathname string, flags uint32) (*memFile, error) { + pflags := newFileOpenFlags(flags) + + file, err := fs.fetch(pathname) + if err == os.ErrNotExist { + if !pflags.Creat { + return nil, os.ErrNotExist + } + + var count int + // You can create files through dangling symlinks. + link, err := fs.lfetch(pathname) + for err == nil && link.symlink != "" { + if pflags.Excl { + // unless you also passed in O_EXCL + return nil, os.ErrInvalid + } + + if count++; count > maxSymlinkFollows { + return nil, errTooManySymlinks + } + + pathname = link.symlink + link, err = fs.lfetch(pathname) + } + + file := &memFile{ + modtime: time.Now(), + } + + if err := fs.putfile(pathname, file); err != nil { + return nil, err + } + + return file, nil + } + + if err != nil { + return nil, err + } + + if pflags.Creat && pflags.Excl { + return nil, os.ErrExist + } + + if file.IsDir() { + return nil, os.ErrInvalid + } + + if pflags.Trunc { + if err := file.Truncate(0); err != nil { + return nil, err + } + } + + return file, nil +} + +func (fs *root) Filecmd(r *Request) error { + if fs.mockErr != nil { + return fs.mockErr + } + _ = r.WithContext(r.Context()) // initialize context for deadlock testing + + fs.mu.Lock() + defer fs.mu.Unlock() + + switch r.Method { + case "Setstat": + file, err := fs.openfile(r.Filepath, sshFxfWrite) + if err != nil { + return err + } + + if r.AttrFlags().Size { + return file.Truncate(int64(r.Attributes().Size)) + } + + return nil + + case "Rename": + // SFTP-v2: "It is an error if there already exists a file with the name specified by newpath." + // This varies from the POSIX specification, which allows limited replacement of target files. + if fs.exists(r.Target) { + return os.ErrExist + } + + return fs.rename(r.Filepath, r.Target) + + case "Rmdir": + return fs.rmdir(r.Filepath) + + case "Remove": + // IEEE 1003.1 remove explicitly can unlink files and remove empty directories. + // We use instead here the semantics of unlink, which is allowed to be restricted against directories. + return fs.unlink(r.Filepath) + + case "Mkdir": + return fs.mkdir(r.Filepath) + + case "Link": + return fs.link(r.Filepath, r.Target) + + case "Symlink": + // NOTE: r.Filepath is the target, and r.Target is the linkpath. + return fs.symlink(r.Filepath, r.Target) + } + + return errors.New("unsupported") +} + +func (fs *root) rename(oldpath, newpath string) error { + file, err := fs.lfetch(oldpath) + if err != nil { + return err + } + + newpath, err = fs.canonName(newpath) + if err != nil { + return err + } + + if !strings.HasPrefix(newpath, "/") { + return os.ErrInvalid + } + + target, err := fs.lfetch(newpath) + if err != os.ErrNotExist { + if target == file { + // IEEE 1003.1: if oldpath and newpath are the same directory entry, + // then return no error, and perform no further action. + return nil + } + + switch { + case file.IsDir(): + // IEEE 1003.1: if oldpath is a directory, and newpath exists, + // then newpath must be a directory, and empty. + // It is to be removed prior to rename. + if err := fs.rmdir(newpath); err != nil { + return err + } + + case target.IsDir(): + // IEEE 1003.1: if oldpath is not a directory, and newpath exists, + // then newpath may not be a directory. + return syscall.EISDIR + } + } + + fs.files[newpath] = file + + if file.IsDir() { + dirprefix := file.name + "/" + + for name, file := range fs.files { + if strings.HasPrefix(name, dirprefix) { + newname := path.Join(newpath, strings.TrimPrefix(name, dirprefix)) + + fs.files[newname] = file + file.name = newname + delete(fs.files, name) + } + } + } + + file.name = newpath + delete(fs.files, oldpath) + + return nil +} + +func (fs *root) PosixRename(r *Request) error { + if fs.mockErr != nil { + return fs.mockErr + } + _ = r.WithContext(r.Context()) // initialize context for deadlock testing + + fs.mu.Lock() + defer fs.mu.Unlock() + + return fs.rename(r.Filepath, r.Target) +} + +func (fs *root) StatVFS(r *Request) (*StatVFS, error) { + if fs.mockErr != nil { + return nil, fs.mockErr + } + + return getStatVFSForPath(r.Filepath) +} + +func (fs *root) mkdir(pathname string) error { + dir := &memFile{ + modtime: time.Now(), + isdir: true, + } + + return fs.putfile(pathname, dir) +} + +func (fs *root) rmdir(pathname string) error { + // IEEE 1003.1: If pathname is a symlink, then rmdir should fail with ENOTDIR. + dir, err := fs.lfetch(pathname) + if err != nil { + return err + } + + if !dir.IsDir() { + return syscall.ENOTDIR + } + + // use the dir‘s internal name not the pathname we passed in. + // the dir.name is always the canonical name of a directory. + pathname = dir.name + + for name := range fs.files { + if path.Dir(name) == pathname { + return errors.New("directory not empty") + } + } + + delete(fs.files, pathname) + + return nil +} + +func (fs *root) link(oldpath, newpath string) error { + file, err := fs.lfetch(oldpath) + if err != nil { + return err + } + + if file.IsDir() { + return errors.New("hard link not allowed for directory") + } + + return fs.putfile(newpath, file) +} + +// symlink() creates a symbolic link named `linkpath` which contains the string `target`. +// NOTE! This would be called with `symlink(req.Filepath, req.Target)` due to different semantics. +func (fs *root) symlink(target, linkpath string) error { + link := &memFile{ + modtime: time.Now(), + symlink: target, + } + + return fs.putfile(linkpath, link) +} + +func (fs *root) unlink(pathname string) error { + // does not follow symlinks! + file, err := fs.lfetch(pathname) + if err != nil { + return err + } + + if file.IsDir() { + // IEEE 1003.1: implementations may opt out of allowing the unlinking of directories. + // SFTP-v2: SSH_FXP_REMOVE may not remove directories. + return os.ErrInvalid + } + + // DO NOT use the file’s internal name. + // because of hard-links files cannot have a single canonical name. + delete(fs.files, pathname) + + return nil +} + +type listerat []os.FileInfo + +// Modeled after strings.Reader's ReadAt() implementation +func (f listerat) ListAt(ls []os.FileInfo, offset int64) (int, error) { + var n int + if offset >= int64(len(f)) { + return 0, io.EOF + } + n = copy(ls, f[offset:]) + if n < len(ls) { + return n, io.EOF + } + return n, nil +} + +func (fs *root) Filelist(r *Request) (ListerAt, error) { + if fs.mockErr != nil { + return nil, fs.mockErr + } + _ = r.WithContext(r.Context()) // initialize context for deadlock testing + + fs.mu.Lock() + defer fs.mu.Unlock() + + switch r.Method { + case "List": + files, err := fs.readdir(r.Filepath) + if err != nil { + return nil, err + } + return listerat(files), nil + + case "Stat": + file, err := fs.fetch(r.Filepath) + if err != nil { + return nil, err + } + return listerat{file}, nil + } + + return nil, errors.New("unsupported") +} + +func (fs *root) readdir(pathname string) ([]os.FileInfo, error) { + dir, err := fs.fetch(pathname) + if err != nil { + return nil, err + } + + if !dir.IsDir() { + return nil, syscall.ENOTDIR + } + + var files []os.FileInfo + + for name, file := range fs.files { + if path.Dir(name) == dir.name { + files = append(files, file) + } + } + + sort.Slice(files, func(i, j int) bool { return files[i].Name() < files[j].Name() }) + + return files, nil +} + +func (fs *root) Readlink(pathname string) (string, error) { + file, err := fs.lfetch(pathname) + if err != nil { + return "", err + } + + if file.symlink == "" { + return "", os.ErrInvalid + } + + return file.symlink, nil +} + +// implements LstatFileLister interface +func (fs *root) Lstat(r *Request) (ListerAt, error) { + if fs.mockErr != nil { + return nil, fs.mockErr + } + _ = r.WithContext(r.Context()) // initialize context for deadlock testing + + fs.mu.Lock() + defer fs.mu.Unlock() + + file, err := fs.lfetch(r.Filepath) + if err != nil { + return nil, err + } + return listerat{file}, nil +} + +// In memory file-system-y thing that the Hanlders live on +type root struct { + rootFile *memFile + mockErr error + + mu sync.Mutex + files map[string]*memFile +} + +// Set a mocked error that the next handler call will return. +// Set to nil to reset for no error. +func (fs *root) returnErr(err error) { + fs.mockErr = err +} + +func (fs *root) lfetch(path string) (*memFile, error) { + if path == "/" { + return fs.rootFile, nil + } + + file, ok := fs.files[path] + if file == nil { + if ok { + delete(fs.files, path) + } + + return nil, os.ErrNotExist + } + + return file, nil +} + +// canonName returns the “canonical” name of a file, that is: +// if the directory of the pathname is a symlink, it follows that symlink to the valid directory name. +// this is relatively easy, since `dir.name` will be the only valid canonical path for a directory. +func (fs *root) canonName(pathname string) (string, error) { + dirname, filename := path.Dir(pathname), path.Base(pathname) + + dir, err := fs.fetch(dirname) + if err != nil { + return "", err + } + + if !dir.IsDir() { + return "", syscall.ENOTDIR + } + + return path.Join(dir.name, filename), nil +} + +func (fs *root) exists(path string) bool { + path, err := fs.canonName(path) + if err != nil { + return false + } + + _, err = fs.lfetch(path) + + return err != os.ErrNotExist +} + +func (fs *root) fetch(pathname string) (*memFile, error) { + file, err := fs.lfetch(pathname) + if err != nil { + return nil, err + } + + var count int + for file.symlink != "" { + if count++; count > maxSymlinkFollows { + return nil, errTooManySymlinks + } + + linkTarget := file.symlink + if !path.IsAbs(linkTarget) { + linkTarget = path.Join(path.Dir(file.name), linkTarget) + } + + file, err = fs.lfetch(linkTarget) + if err != nil { + return nil, err + } + } + + return file, nil +} + +// Implements os.FileInfo, io.ReaderAt and io.WriterAt interfaces. +// These are the 3 interfaces necessary for the Handlers. +// Implements the optional interface TransferError. +type memFile struct { + name string + modtime time.Time + symlink string + isdir bool + + mu sync.RWMutex + content []byte + err error +} + +// These are helper functions, they must be called while holding the memFile.mu mutex +func (f *memFile) size() int64 { return int64(len(f.content)) } +func (f *memFile) grow(n int64) { f.content = append(f.content, make([]byte, n)...) } + +// Have memFile fulfill os.FileInfo interface +func (f *memFile) Name() string { return path.Base(f.name) } +func (f *memFile) Size() int64 { + f.mu.Lock() + defer f.mu.Unlock() + + return f.size() +} +func (f *memFile) Mode() os.FileMode { + if f.isdir { + return os.FileMode(0755) | os.ModeDir + } + if f.symlink != "" { + return os.FileMode(0777) | os.ModeSymlink + } + return os.FileMode(0644) +} +func (f *memFile) ModTime() time.Time { return f.modtime } +func (f *memFile) IsDir() bool { return f.isdir } +func (f *memFile) Sys() interface{} { + return fakeFileInfoSys() +} + +func (f *memFile) ReadAt(b []byte, off int64) (int, error) { + f.mu.Lock() + defer f.mu.Unlock() + + if f.err != nil { + return 0, f.err + } + + if off < 0 { + return 0, errors.New("memFile.ReadAt: negative offset") + } + + if off >= f.size() { + return 0, io.EOF + } + + n := copy(b, f.content[off:]) + if n < len(b) { + return n, io.EOF + } + + return n, nil +} + +func (f *memFile) WriteAt(b []byte, off int64) (int, error) { + // fmt.Println(string(p), off) + // mimic write delays, should be optional + time.Sleep(time.Microsecond * time.Duration(len(b))) + + f.mu.Lock() + defer f.mu.Unlock() + + if f.err != nil { + return 0, f.err + } + + grow := int64(len(b)) + off - f.size() + if grow > 0 { + f.grow(grow) + } + + return copy(f.content[off:], b), nil +} + +func (f *memFile) Truncate(size int64) error { + f.mu.Lock() + defer f.mu.Unlock() + + if f.err != nil { + return f.err + } + + grow := size - f.size() + if grow <= 0 { + f.content = f.content[:size] + } else { + f.grow(grow) + } + + return nil +} + +func (f *memFile) TransferError(err error) { + f.mu.Lock() + defer f.mu.Unlock() + + f.err = err +} diff --git a/vendor/github.com/pkg/sftp/request-interfaces.go b/vendor/github.com/pkg/sftp/request-interfaces.go new file mode 100644 index 000000000..2090e3162 --- /dev/null +++ b/vendor/github.com/pkg/sftp/request-interfaces.go @@ -0,0 +1,157 @@ +package sftp + +import ( + "io" + "os" +) + +// WriterAtReaderAt defines the interface to return when a file is to +// be opened for reading and writing +type WriterAtReaderAt interface { + io.WriterAt + io.ReaderAt +} + +// Interfaces are differentiated based on required returned values. +// All input arguments are to be pulled from Request (the only arg). + +// The Handler interfaces all take the Request object as its only argument. +// All the data you should need to handle the call are in the Request object. +// The request.Method attribute is initially the most important one as it +// determines which Handler gets called. + +// FileReader should return an io.ReaderAt for the filepath +// Note in cases of an error, the error text will be sent to the client. +// Called for Methods: Get +type FileReader interface { + Fileread(*Request) (io.ReaderAt, error) +} + +// FileWriter should return an io.WriterAt for the filepath. +// +// The request server code will call Close() on the returned io.WriterAt +// ojbect if an io.Closer type assertion succeeds. +// Note in cases of an error, the error text will be sent to the client. +// Note when receiving an Append flag it is important to not open files using +// O_APPEND if you plan to use WriteAt, as they conflict. +// Called for Methods: Put, Open +type FileWriter interface { + Filewrite(*Request) (io.WriterAt, error) +} + +// OpenFileWriter is a FileWriter that implements the generic OpenFile method. +// You need to implement this optional interface if you want to be able +// to read and write from/to the same handle. +// Called for Methods: Open +type OpenFileWriter interface { + FileWriter + OpenFile(*Request) (WriterAtReaderAt, error) +} + +// FileCmder should return an error +// Note in cases of an error, the error text will be sent to the client. +// Called for Methods: Setstat, Rename, Rmdir, Mkdir, Link, Symlink, Remove +type FileCmder interface { + Filecmd(*Request) error +} + +// PosixRenameFileCmder is a FileCmder that implements the PosixRename method. +// If this interface is implemented PosixRename requests will call it +// otherwise they will be handled in the same way as Rename +type PosixRenameFileCmder interface { + FileCmder + PosixRename(*Request) error +} + +// StatVFSFileCmder is a FileCmder that implements the StatVFS method. +// You need to implement this interface if you want to handle statvfs requests. +// Please also be sure that the statvfs@openssh.com extension is enabled +type StatVFSFileCmder interface { + FileCmder + StatVFS(*Request) (*StatVFS, error) +} + +// FileLister should return an object that fulfils the ListerAt interface +// Note in cases of an error, the error text will be sent to the client. +// Called for Methods: List, Stat, Readlink +// +// Since Filelist returns an os.FileInfo, this can make it non-ideal for implementing Readlink. +// This is because the Name receiver method defined by that interface defines that it should only return the base name. +// However, Readlink is required to be capable of returning essentially any arbitrary valid path relative or absolute. +// In order to implement this more expressive requirement, implement [ReadlinkFileLister] which will then be used instead. +type FileLister interface { + Filelist(*Request) (ListerAt, error) +} + +// LstatFileLister is a FileLister that implements the Lstat method. +// If this interface is implemented Lstat requests will call it +// otherwise they will be handled in the same way as Stat +type LstatFileLister interface { + FileLister + Lstat(*Request) (ListerAt, error) +} + +// RealPathFileLister is a FileLister that implements the Realpath method. +// The built-in RealPath implementation does not resolve symbolic links. +// By implementing this interface you can customize the returned path +// and, for example, resolve symbolinc links if needed for your use case. +// You have to return an absolute POSIX path. +// +// Up to v1.13.5 the signature for the RealPath method was: +// +// # RealPath(string) string +// +// we have added a legacyRealPathFileLister that implements the old method +// to ensure that your code does not break. +// You should use the new method signature to avoid future issues +type RealPathFileLister interface { + FileLister + RealPath(string) (string, error) +} + +// ReadlinkFileLister is a FileLister that implements the Readlink method. +// By implementing the Readlink method, it is possible to return any arbitrary valid path relative or absolute. +// This allows giving a better response than via the default FileLister (which is limited to os.FileInfo, whose Name method should only return the base name of a file) +type ReadlinkFileLister interface { + FileLister + Readlink(string) (string, error) +} + +// This interface is here for backward compatibility only +type legacyRealPathFileLister interface { + FileLister + RealPath(string) string +} + +// NameLookupFileLister is a FileLister that implmeents the LookupUsername and LookupGroupName methods. +// If this interface is implemented, then longname ls formatting will use these to convert usernames and groupnames. +type NameLookupFileLister interface { + FileLister + LookupUserName(string) string + LookupGroupName(string) string +} + +// ListerAt does for file lists what io.ReaderAt does for files, i.e. a []os.FileInfo buffer is passed to the ListAt function +// and the entries that are populated in the buffer will be passed to the client. +// +// ListAt should return the number of entries copied and an io.EOF error if at end of list. +// This is testable by comparing how many you copied to how many could be copied (eg. n < len(ls) below). +// The copy() builtin is best for the copying. +// +// Uid and gid information will on unix systems be retrieved from [os.FileInfo.Sys] +// if this function returns a [syscall.Stat_t] when called on a populated entry. +// Alternatively, if the entry implements [FileInfoUidGid], it will be used for uid and gid information. +// +// If a populated entry implements [FileInfoExtendedData], extended attributes will also be returned to the client. +// +// Note in cases of an error, the error text will be sent to the client. +type ListerAt interface { + ListAt([]os.FileInfo, int64) (int, error) +} + +// TransferError is an optional interface that readerAt and writerAt +// can implement to be notified about the error causing Serve() to exit +// with the request still open +type TransferError interface { + TransferError(err error) +} diff --git a/vendor/github.com/pkg/sftp/request-plan9.go b/vendor/github.com/pkg/sftp/request-plan9.go new file mode 100644 index 000000000..38f91bcde --- /dev/null +++ b/vendor/github.com/pkg/sftp/request-plan9.go @@ -0,0 +1,16 @@ +//go:build plan9 +// +build plan9 + +package sftp + +import ( + "syscall" +) + +func fakeFileInfoSys() interface{} { + return &syscall.Dir{} +} + +func testOsSys(sys interface{}) error { + return nil +} diff --git a/vendor/github.com/pkg/sftp/request-readme.md b/vendor/github.com/pkg/sftp/request-readme.md new file mode 100644 index 000000000..f8b81f3aa --- /dev/null +++ b/vendor/github.com/pkg/sftp/request-readme.md @@ -0,0 +1,53 @@ +# Request Based SFTP API + +The request based API allows for custom backends in a way similar to the http +package. In order to create a backend you need to implement 4 handler +interfaces; one for reading, one for writing, one for misc commands and one for +listing files. Each has 1 required method and in each case those methods take +the Request as the only parameter and they each return something different. +These 4 interfaces are enough to handle all the SFTP traffic in a simplified +manner. + +The Request structure has 5 public fields which you will deal with. + +- Method (string) - string name of incoming call +- Filepath (string) - POSIX path of file to act on +- Flags (uint32) - 32bit bitmask value of file open/create flags +- Attrs ([]byte) - byte string of file attribute data +- Target (string) - target path for renames and sym-links + +Below are the methods and a brief description of what they need to do. + +### Fileread(*Request) (io.Reader, error) + +Handler for "Get" method and returns an io.Reader for the file which the server +then sends to the client. + +### Filewrite(*Request) (io.Writer, error) + +Handler for "Put" method and returns an io.Writer for the file which the server +then writes the uploaded file to. The file opening "pflags" are currently +preserved in the Request.Flags field as a 32bit bitmask value. See the [SFTP +spec](https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-6.3) for +details. + +### Filecmd(*Request) error + +Handles "SetStat", "Rename", "Rmdir", "Mkdir" and "Symlink" methods. Makes the +appropriate changes and returns nil for success or an filesystem like error +(eg. os.ErrNotExist). The attributes are currently propagated in their raw form +([]byte) and will need to be unmarshalled to be useful. See the respond method +on sshFxpSetstatPacket for example of you might want to do this. + +### Fileinfo(*Request) ([]os.FileInfo, error) + +Handles "List", "Stat", "Readlink" methods. Gathers/creates FileInfo structs +with the data on the files and returns in a list (list of 1 for Stat and +Readlink). + + +## TODO + +- Add support for API users to see trace/debugging info of what is going on +inside SFTP server. +- Unmarshal the file attributes into a structure on the Request object. diff --git a/vendor/github.com/pkg/sftp/request-server.go b/vendor/github.com/pkg/sftp/request-server.go new file mode 100644 index 000000000..7a99db648 --- /dev/null +++ b/vendor/github.com/pkg/sftp/request-server.go @@ -0,0 +1,337 @@ +package sftp + +import ( + "context" + "errors" + "io" + "path" + "path/filepath" + "strconv" + "sync" +) + +var maxTxPacket uint32 = 1 << 15 + +// Handlers contains the 4 SFTP server request handlers. +type Handlers struct { + FileGet FileReader + FilePut FileWriter + FileCmd FileCmder + FileList FileLister +} + +// RequestServer abstracts the sftp protocol with an http request-like protocol +type RequestServer struct { + Handlers Handlers + + *serverConn + pktMgr *packetManager + + startDirectory string + + mu sync.RWMutex + handleCount int + openRequests map[string]*Request +} + +// A RequestServerOption is a function which applies configuration to a RequestServer. +type RequestServerOption func(*RequestServer) + +// WithRSAllocator enable the allocator. +// After processing a packet we keep in memory the allocated slices +// and we reuse them for new packets. +// The allocator is experimental +func WithRSAllocator() RequestServerOption { + return func(rs *RequestServer) { + alloc := newAllocator() + rs.pktMgr.alloc = alloc + rs.conn.alloc = alloc + } +} + +// WithStartDirectory sets a start directory to use as base for relative paths. +// If unset the default is "/" +func WithStartDirectory(startDirectory string) RequestServerOption { + return func(rs *RequestServer) { + rs.startDirectory = cleanPath(startDirectory) + } +} + +// NewRequestServer creates/allocates/returns new RequestServer. +// Normally there will be one server per user-session. +func NewRequestServer(rwc io.ReadWriteCloser, h Handlers, options ...RequestServerOption) *RequestServer { + svrConn := &serverConn{ + conn: conn{ + Reader: rwc, + WriteCloser: rwc, + }, + } + rs := &RequestServer{ + Handlers: h, + + serverConn: svrConn, + pktMgr: newPktMgr(svrConn), + + startDirectory: "/", + + openRequests: make(map[string]*Request), + } + + for _, o := range options { + o(rs) + } + return rs +} + +// New Open packet/Request +func (rs *RequestServer) nextRequest(r *Request) string { + rs.mu.Lock() + defer rs.mu.Unlock() + + rs.handleCount++ + + r.handle = strconv.Itoa(rs.handleCount) + rs.openRequests[r.handle] = r + + return r.handle +} + +// Returns Request from openRequests, bool is false if it is missing. +// +// The Requests in openRequests work essentially as open file descriptors that +// you can do different things with. What you are doing with it are denoted by +// the first packet of that type (read/write/etc). +func (rs *RequestServer) getRequest(handle string) (*Request, bool) { + rs.mu.RLock() + defer rs.mu.RUnlock() + + r, ok := rs.openRequests[handle] + return r, ok +} + +// Close the Request and clear from openRequests map +func (rs *RequestServer) closeRequest(handle string) error { + rs.mu.Lock() + defer rs.mu.Unlock() + + if r, ok := rs.openRequests[handle]; ok { + delete(rs.openRequests, handle) + return r.close() + } + + return EBADF +} + +// Close the read/write/closer to trigger exiting the main server loop +func (rs *RequestServer) Close() error { return rs.conn.Close() } + +func (rs *RequestServer) serveLoop(pktChan chan<- orderedRequest) error { + defer close(pktChan) // shuts down sftpServerWorkers + + var err error + var pkt requestPacket + var pktType uint8 + var pktBytes []byte + + for { + pktType, pktBytes, err = rs.serverConn.recvPacket(rs.pktMgr.getNextOrderID()) + if err != nil { + // we don't care about releasing allocated pages here, the server will quit and the allocator freed + return err + } + + pkt, err = makePacket(rxPacket{fxp(pktType), pktBytes}) + if err != nil { + switch { + case errors.Is(err, errUnknownExtendedPacket): + // do nothing + default: + debug("makePacket err: %v", err) + rs.conn.Close() // shuts down recvPacket + return err + } + } + + pktChan <- rs.pktMgr.newOrderedRequest(pkt) + } +} + +// Serve requests for user session +func (rs *RequestServer) Serve() error { + defer func() { + if rs.pktMgr.alloc != nil { + rs.pktMgr.alloc.Free() + } + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var wg sync.WaitGroup + runWorker := func(ch chan orderedRequest) { + wg.Add(1) + go func() { + defer wg.Done() + if err := rs.packetWorker(ctx, ch); err != nil { + rs.conn.Close() // shuts down recvPacket + } + }() + } + pktChan := rs.pktMgr.workerChan(runWorker) + + err := rs.serveLoop(pktChan) + + wg.Wait() // wait for all workers to exit + + rs.mu.Lock() + defer rs.mu.Unlock() + + // make sure all open requests are properly closed + // (eg. possible on dropped connections, client crashes, etc.) + for handle, req := range rs.openRequests { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + req.transferError(err) + + delete(rs.openRequests, handle) + req.close() + } + + return err +} + +func (rs *RequestServer) packetWorker(ctx context.Context, pktChan chan orderedRequest) error { + for pkt := range pktChan { + orderID := pkt.orderID() + if epkt, ok := pkt.requestPacket.(*sshFxpExtendedPacket); ok { + if epkt.SpecificPacket != nil { + pkt.requestPacket = epkt.SpecificPacket + } + } + + var rpkt responsePacket + switch pkt := pkt.requestPacket.(type) { + case *sshFxInitPacket: + rpkt = &sshFxVersionPacket{Version: sftpProtocolVersion, Extensions: sftpExtensions} + case *sshFxpClosePacket: + handle := pkt.getHandle() + rpkt = statusFromError(pkt.ID, rs.closeRequest(handle)) + case *sshFxpRealpathPacket: + var realPath string + var err error + + switch pather := rs.Handlers.FileList.(type) { + case RealPathFileLister: + realPath, err = pather.RealPath(pkt.getPath()) + case legacyRealPathFileLister: + realPath = pather.RealPath(pkt.getPath()) + default: + realPath = cleanPathWithBase(rs.startDirectory, pkt.getPath()) + } + if err != nil { + rpkt = statusFromError(pkt.ID, err) + } else { + rpkt = cleanPacketPath(pkt, realPath) + } + case *sshFxpOpendirPacket: + request := requestFromPacket(ctx, pkt, rs.startDirectory) + handle := rs.nextRequest(request) + rpkt = request.opendir(rs.Handlers, pkt) + if _, ok := rpkt.(*sshFxpHandlePacket); !ok { + // if we return an error we have to remove the handle from the active ones + rs.closeRequest(handle) + } + case *sshFxpOpenPacket: + request := requestFromPacket(ctx, pkt, rs.startDirectory) + handle := rs.nextRequest(request) + rpkt = request.open(rs.Handlers, pkt) + if _, ok := rpkt.(*sshFxpHandlePacket); !ok { + // if we return an error we have to remove the handle from the active ones + rs.closeRequest(handle) + } + case *sshFxpFstatPacket: + handle := pkt.getHandle() + request, ok := rs.getRequest(handle) + if !ok { + rpkt = statusFromError(pkt.ID, EBADF) + } else { + request = &Request{ + Method: "Stat", + Filepath: cleanPathWithBase(rs.startDirectory, request.Filepath), + } + rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) + } + case *sshFxpFsetstatPacket: + handle := pkt.getHandle() + request, ok := rs.getRequest(handle) + if !ok { + rpkt = statusFromError(pkt.ID, EBADF) + } else { + request = &Request{ + Method: "Setstat", + Filepath: cleanPathWithBase(rs.startDirectory, request.Filepath), + } + rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) + } + case *sshFxpExtendedPacketPosixRename: + request := &Request{ + Method: "PosixRename", + Filepath: cleanPathWithBase(rs.startDirectory, pkt.Oldpath), + Target: cleanPathWithBase(rs.startDirectory, pkt.Newpath), + } + rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) + case *sshFxpExtendedPacketStatVFS: + request := &Request{ + Method: "StatVFS", + Filepath: cleanPathWithBase(rs.startDirectory, pkt.Path), + } + rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) + case hasHandle: + handle := pkt.getHandle() + request, ok := rs.getRequest(handle) + if !ok { + rpkt = statusFromError(pkt.id(), EBADF) + } else { + rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) + } + case hasPath: + request := requestFromPacket(ctx, pkt, rs.startDirectory) + rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) + request.close() + default: + rpkt = statusFromError(pkt.id(), ErrSSHFxOpUnsupported) + } + + rs.pktMgr.readyPacket( + rs.pktMgr.newOrderedResponse(rpkt, orderID)) + } + return nil +} + +// clean and return name packet for file +func cleanPacketPath(pkt *sshFxpRealpathPacket, realPath string) responsePacket { + return &sshFxpNamePacket{ + ID: pkt.id(), + NameAttrs: []*sshFxpNameAttr{ + { + Name: realPath, + LongName: realPath, + Attrs: emptyFileStat, + }, + }, + } +} + +// Makes sure we have a clean POSIX (/) absolute path to work with +func cleanPath(p string) string { + return cleanPathWithBase("/", p) +} + +func cleanPathWithBase(base, p string) string { + p = filepath.ToSlash(filepath.Clean(p)) + if !path.IsAbs(p) { + return path.Join(base, p) + } + return p +} diff --git a/vendor/github.com/pkg/sftp/request-unix.go b/vendor/github.com/pkg/sftp/request-unix.go new file mode 100644 index 000000000..e3e037d60 --- /dev/null +++ b/vendor/github.com/pkg/sftp/request-unix.go @@ -0,0 +1,24 @@ +//go:build !windows && !plan9 +// +build !windows,!plan9 + +package sftp + +import ( + "errors" + "syscall" +) + +func fakeFileInfoSys() interface{} { + return &syscall.Stat_t{Uid: 65534, Gid: 65534} +} + +func testOsSys(sys interface{}) error { + fstat := sys.(*FileStat) + if fstat.UID != uint32(65534) { + return errors.New("Uid failed to match") + } + if fstat.GID != uint32(65534) { + return errors.New("Gid failed to match") + } + return nil +} diff --git a/vendor/github.com/pkg/sftp/request.go b/vendor/github.com/pkg/sftp/request.go new file mode 100644 index 000000000..57d788df6 --- /dev/null +++ b/vendor/github.com/pkg/sftp/request.go @@ -0,0 +1,653 @@ +package sftp + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "strings" + "sync" + "syscall" +) + +// MaxFilelist is the max number of files to return in a readdir batch. +var MaxFilelist int64 = 100 + +// state encapsulates the reader/writer/readdir from handlers. +type state struct { + mu sync.RWMutex + + writerAt io.WriterAt + readerAt io.ReaderAt + writerAtReaderAt WriterAtReaderAt + listerAt ListerAt + lsoffset int64 +} + +// copy returns a shallow copy the state. +// This is broken out to specific fields, +// because we have to copy around the mutex in state. +func (s *state) copy() state { + s.mu.RLock() + defer s.mu.RUnlock() + + return state{ + writerAt: s.writerAt, + readerAt: s.readerAt, + writerAtReaderAt: s.writerAtReaderAt, + listerAt: s.listerAt, + lsoffset: s.lsoffset, + } +} + +func (s *state) setReaderAt(rd io.ReaderAt) { + s.mu.Lock() + defer s.mu.Unlock() + + s.readerAt = rd +} + +func (s *state) getReaderAt() io.ReaderAt { + s.mu.RLock() + defer s.mu.RUnlock() + + return s.readerAt +} + +func (s *state) setWriterAt(rd io.WriterAt) { + s.mu.Lock() + defer s.mu.Unlock() + + s.writerAt = rd +} + +func (s *state) getWriterAt() io.WriterAt { + s.mu.RLock() + defer s.mu.RUnlock() + + return s.writerAt +} + +func (s *state) setWriterAtReaderAt(rw WriterAtReaderAt) { + s.mu.Lock() + defer s.mu.Unlock() + + s.writerAtReaderAt = rw +} + +func (s *state) getWriterAtReaderAt() WriterAtReaderAt { + s.mu.RLock() + defer s.mu.RUnlock() + + return s.writerAtReaderAt +} + +func (s *state) getAllReaderWriters() (io.ReaderAt, io.WriterAt, WriterAtReaderAt) { + s.mu.RLock() + defer s.mu.RUnlock() + + return s.readerAt, s.writerAt, s.writerAtReaderAt +} + +// Returns current offset for file list +func (s *state) lsNext() int64 { + s.mu.RLock() + defer s.mu.RUnlock() + + return s.lsoffset +} + +// Increases next offset +func (s *state) lsInc(offset int64) { + s.mu.Lock() + defer s.mu.Unlock() + + s.lsoffset += offset +} + +// manage file read/write state +func (s *state) setListerAt(la ListerAt) { + s.mu.Lock() + defer s.mu.Unlock() + + s.listerAt = la +} + +func (s *state) getListerAt() ListerAt { + s.mu.RLock() + defer s.mu.RUnlock() + + return s.listerAt +} + +// Request contains the data and state for the incoming service request. +type Request struct { + // Get, Put, Setstat, Stat, Rename, Remove + // Rmdir, Mkdir, List, Readlink, Link, Symlink + Method string + Filepath string + Flags uint32 + Attrs []byte // convert to sub-struct + Target string // for renames and sym-links + handle string + + // reader/writer/readdir from handlers + state + + // context lasts duration of request + ctx context.Context + cancelCtx context.CancelFunc +} + +// NewRequest creates a new Request object. +func NewRequest(method, path string) *Request { + return &Request{ + Method: method, + Filepath: cleanPath(path), + } +} + +// copy returns a shallow copy of existing request. +// This is broken out to specific fields, +// because we have to copy around the mutex in state. +func (r *Request) copy() *Request { + return &Request{ + Method: r.Method, + Filepath: r.Filepath, + Flags: r.Flags, + Attrs: r.Attrs, + Target: r.Target, + handle: r.handle, + + state: r.state.copy(), + + ctx: r.ctx, + cancelCtx: r.cancelCtx, + } +} + +// New Request initialized based on packet data +func requestFromPacket(ctx context.Context, pkt hasPath, baseDir string) *Request { + request := &Request{ + Method: requestMethod(pkt), + Filepath: cleanPathWithBase(baseDir, pkt.getPath()), + } + request.ctx, request.cancelCtx = context.WithCancel(ctx) + + switch p := pkt.(type) { + case *sshFxpOpenPacket: + request.Flags = p.Pflags + case *sshFxpSetstatPacket: + request.Flags = p.Flags + request.Attrs = p.Attrs.([]byte) + case *sshFxpRenamePacket: + request.Target = cleanPathWithBase(baseDir, p.Newpath) + case *sshFxpSymlinkPacket: + // NOTE: given a POSIX compliant signature: symlink(target, linkpath string) + // this makes Request.Target the linkpath, and Request.Filepath the target. + request.Target = cleanPathWithBase(baseDir, p.Linkpath) + request.Filepath = p.Targetpath + case *sshFxpExtendedPacketHardlink: + request.Target = cleanPathWithBase(baseDir, p.Newpath) + } + return request +} + +// Context returns the request's context. To change the context, +// use WithContext. +// +// The returned context is always non-nil; it defaults to the +// background context. +// +// For incoming server requests, the context is canceled when the +// request is complete or the client's connection closes. +func (r *Request) Context() context.Context { + if r.ctx != nil { + return r.ctx + } + return context.Background() +} + +// WithContext returns a copy of r with its context changed to ctx. +// The provided ctx must be non-nil. +func (r *Request) WithContext(ctx context.Context) *Request { + if ctx == nil { + panic("nil context") + } + r2 := r.copy() + r2.ctx = ctx + r2.cancelCtx = nil + return r2 +} + +// Close reader/writer if possible +func (r *Request) close() error { + defer func() { + if r.cancelCtx != nil { + r.cancelCtx() + } + }() + + rd, wr, rw := r.getAllReaderWriters() + + var err error + + // Close errors on a Writer are far more likely to be the important one. + // As they can be information that there was a loss of data. + if c, ok := wr.(io.Closer); ok { + if err2 := c.Close(); err == nil { + // update error if it is still nil + err = err2 + } + } + + if c, ok := rw.(io.Closer); ok { + if err2 := c.Close(); err == nil { + // update error if it is still nil + err = err2 + + r.setWriterAtReaderAt(nil) + } + } + + if c, ok := rd.(io.Closer); ok { + if err2 := c.Close(); err == nil { + // update error if it is still nil + err = err2 + } + } + + return err +} + +// Notify transfer error if any +func (r *Request) transferError(err error) { + if err == nil { + return + } + + rd, wr, rw := r.getAllReaderWriters() + + if t, ok := wr.(TransferError); ok { + t.TransferError(err) + } + + if t, ok := rw.(TransferError); ok { + t.TransferError(err) + } + + if t, ok := rd.(TransferError); ok { + t.TransferError(err) + } +} + +// called from worker to handle packet/request +func (r *Request) call(handlers Handlers, pkt requestPacket, alloc *allocator, orderID uint32) responsePacket { + switch r.Method { + case "Get": + return fileget(handlers.FileGet, r, pkt, alloc, orderID) + case "Put": + return fileput(handlers.FilePut, r, pkt, alloc, orderID) + case "Open": + return fileputget(handlers.FilePut, r, pkt, alloc, orderID) + case "Setstat", "Rename", "Rmdir", "Mkdir", "Link", "Symlink", "Remove", "PosixRename", "StatVFS": + return filecmd(handlers.FileCmd, r, pkt) + case "List": + return filelist(handlers.FileList, r, pkt) + case "Stat", "Lstat": + return filestat(handlers.FileList, r, pkt) + case "Readlink": + if readlinkFileLister, ok := handlers.FileList.(ReadlinkFileLister); ok { + return readlink(readlinkFileLister, r, pkt) + } + return filestat(handlers.FileList, r, pkt) + default: + return statusFromError(pkt.id(), fmt.Errorf("unexpected method: %s", r.Method)) + } +} + +// Additional initialization for Open packets +func (r *Request) open(h Handlers, pkt requestPacket) responsePacket { + flags := r.Pflags() + + id := pkt.id() + + switch { + case flags.Write, flags.Append, flags.Creat, flags.Trunc: + if flags.Read { + if openFileWriter, ok := h.FilePut.(OpenFileWriter); ok { + r.Method = "Open" + rw, err := openFileWriter.OpenFile(r) + if err != nil { + return statusFromError(id, err) + } + + r.setWriterAtReaderAt(rw) + + return &sshFxpHandlePacket{ + ID: id, + Handle: r.handle, + } + } + } + + r.Method = "Put" + wr, err := h.FilePut.Filewrite(r) + if err != nil { + return statusFromError(id, err) + } + + r.setWriterAt(wr) + + case flags.Read: + r.Method = "Get" + rd, err := h.FileGet.Fileread(r) + if err != nil { + return statusFromError(id, err) + } + + r.setReaderAt(rd) + + default: + return statusFromError(id, errors.New("bad file flags")) + } + + return &sshFxpHandlePacket{ + ID: id, + Handle: r.handle, + } +} + +func (r *Request) opendir(h Handlers, pkt requestPacket) responsePacket { + r.Method = "List" + la, err := h.FileList.Filelist(r) + if err != nil { + return statusFromError(pkt.id(), wrapPathError(r.Filepath, err)) + } + + r.setListerAt(la) + + return &sshFxpHandlePacket{ + ID: pkt.id(), + Handle: r.handle, + } +} + +// wrap FileReader handler +func fileget(h FileReader, r *Request, pkt requestPacket, alloc *allocator, orderID uint32) responsePacket { + rd := r.getReaderAt() + if rd == nil { + return statusFromError(pkt.id(), errors.New("unexpected read packet")) + } + + data, offset, _ := packetData(pkt, alloc, orderID) + + n, err := rd.ReadAt(data, offset) + // only return EOF error if no data left to read + if err != nil && (err != io.EOF || n == 0) { + return statusFromError(pkt.id(), err) + } + + return &sshFxpDataPacket{ + ID: pkt.id(), + Length: uint32(n), + Data: data[:n], + } +} + +// wrap FileWriter handler +func fileput(h FileWriter, r *Request, pkt requestPacket, alloc *allocator, orderID uint32) responsePacket { + wr := r.getWriterAt() + if wr == nil { + return statusFromError(pkt.id(), errors.New("unexpected write packet")) + } + + data, offset, _ := packetData(pkt, alloc, orderID) + + _, err := wr.WriteAt(data, offset) + return statusFromError(pkt.id(), err) +} + +// wrap OpenFileWriter handler +func fileputget(h FileWriter, r *Request, pkt requestPacket, alloc *allocator, orderID uint32) responsePacket { + rw := r.getWriterAtReaderAt() + if rw == nil { + return statusFromError(pkt.id(), errors.New("unexpected write and read packet")) + } + + switch p := pkt.(type) { + case *sshFxpReadPacket: + data, offset := p.getDataSlice(alloc, orderID), int64(p.Offset) + + n, err := rw.ReadAt(data, offset) + // only return EOF error if no data left to read + if err != nil && (err != io.EOF || n == 0) { + return statusFromError(pkt.id(), err) + } + + return &sshFxpDataPacket{ + ID: pkt.id(), + Length: uint32(n), + Data: data[:n], + } + + case *sshFxpWritePacket: + data, offset := p.Data, int64(p.Offset) + + _, err := rw.WriteAt(data, offset) + return statusFromError(pkt.id(), err) + + default: + return statusFromError(pkt.id(), errors.New("unexpected packet type for read or write")) + } +} + +// file data for additional read/write packets +func packetData(p requestPacket, alloc *allocator, orderID uint32) (data []byte, offset int64, length uint32) { + switch p := p.(type) { + case *sshFxpReadPacket: + return p.getDataSlice(alloc, orderID), int64(p.Offset), p.Len + case *sshFxpWritePacket: + return p.Data, int64(p.Offset), p.Length + } + return +} + +// wrap FileCmder handler +func filecmd(h FileCmder, r *Request, pkt requestPacket) responsePacket { + switch p := pkt.(type) { + case *sshFxpFsetstatPacket: + r.Flags = p.Flags + r.Attrs = p.Attrs.([]byte) + } + + switch r.Method { + case "PosixRename": + if posixRenamer, ok := h.(PosixRenameFileCmder); ok { + err := posixRenamer.PosixRename(r) + return statusFromError(pkt.id(), err) + } + + // PosixRenameFileCmder not implemented handle this request as a Rename + r.Method = "Rename" + err := h.Filecmd(r) + return statusFromError(pkt.id(), err) + + case "StatVFS": + if statVFSCmdr, ok := h.(StatVFSFileCmder); ok { + stat, err := statVFSCmdr.StatVFS(r) + if err != nil { + return statusFromError(pkt.id(), err) + } + stat.ID = pkt.id() + return stat + } + + return statusFromError(pkt.id(), ErrSSHFxOpUnsupported) + } + + err := h.Filecmd(r) + return statusFromError(pkt.id(), err) +} + +// wrap FileLister handler +func filelist(h FileLister, r *Request, pkt requestPacket) responsePacket { + lister := r.getListerAt() + if lister == nil { + return statusFromError(pkt.id(), errors.New("unexpected dir packet")) + } + + offset := r.lsNext() + finfo := make([]os.FileInfo, MaxFilelist) + n, err := lister.ListAt(finfo, offset) + r.lsInc(int64(n)) + // ignore EOF as we only return it when there are no results + finfo = finfo[:n] // avoid need for nil tests below + + switch r.Method { + case "List": + if err != nil && (err != io.EOF || n == 0) { + return statusFromError(pkt.id(), err) + } + + nameAttrs := make([]*sshFxpNameAttr, 0, len(finfo)) + + // If the type conversion fails, we get untyped `nil`, + // which is handled by not looking up any names. + idLookup, _ := h.(NameLookupFileLister) + + for _, fi := range finfo { + nameAttrs = append(nameAttrs, &sshFxpNameAttr{ + Name: fi.Name(), + LongName: runLs(idLookup, fi), + Attrs: []interface{}{fi}, + }) + } + + return &sshFxpNamePacket{ + ID: pkt.id(), + NameAttrs: nameAttrs, + } + + default: + err = fmt.Errorf("unexpected method: %s", r.Method) + return statusFromError(pkt.id(), err) + } +} + +func filestat(h FileLister, r *Request, pkt requestPacket) responsePacket { + var lister ListerAt + var err error + + if r.Method == "Lstat" { + if lstatFileLister, ok := h.(LstatFileLister); ok { + lister, err = lstatFileLister.Lstat(r) + } else { + // LstatFileLister not implemented handle this request as a Stat + r.Method = "Stat" + lister, err = h.Filelist(r) + } + } else { + lister, err = h.Filelist(r) + } + if err != nil { + return statusFromError(pkt.id(), err) + } + finfo := make([]os.FileInfo, 1) + n, err := lister.ListAt(finfo, 0) + finfo = finfo[:n] // avoid need for nil tests below + + switch r.Method { + case "Stat", "Lstat": + if err != nil && err != io.EOF { + return statusFromError(pkt.id(), err) + } + if n == 0 { + err = &os.PathError{ + Op: strings.ToLower(r.Method), + Path: r.Filepath, + Err: syscall.ENOENT, + } + return statusFromError(pkt.id(), err) + } + return &sshFxpStatResponse{ + ID: pkt.id(), + info: finfo[0], + } + case "Readlink": + if err != nil && err != io.EOF { + return statusFromError(pkt.id(), err) + } + if n == 0 { + err = &os.PathError{ + Op: "readlink", + Path: r.Filepath, + Err: syscall.ENOENT, + } + return statusFromError(pkt.id(), err) + } + filename := finfo[0].Name() + return &sshFxpNamePacket{ + ID: pkt.id(), + NameAttrs: []*sshFxpNameAttr{ + { + Name: filename, + LongName: filename, + Attrs: emptyFileStat, + }, + }, + } + default: + err = fmt.Errorf("unexpected method: %s", r.Method) + return statusFromError(pkt.id(), err) + } +} + +func readlink(readlinkFileLister ReadlinkFileLister, r *Request, pkt requestPacket) responsePacket { + resolved, err := readlinkFileLister.Readlink(r.Filepath) + if err != nil { + return statusFromError(pkt.id(), err) + } + return &sshFxpNamePacket{ + ID: pkt.id(), + NameAttrs: []*sshFxpNameAttr{ + { + Name: resolved, + LongName: resolved, + Attrs: emptyFileStat, + }, + }, + } +} + +// init attributes of request object from packet data +func requestMethod(p requestPacket) (method string) { + switch p.(type) { + case *sshFxpReadPacket, *sshFxpWritePacket, *sshFxpOpenPacket: + // set in open() above + case *sshFxpOpendirPacket, *sshFxpReaddirPacket: + // set in opendir() above + case *sshFxpSetstatPacket, *sshFxpFsetstatPacket: + method = "Setstat" + case *sshFxpRenamePacket: + method = "Rename" + case *sshFxpSymlinkPacket: + method = "Symlink" + case *sshFxpRemovePacket: + method = "Remove" + case *sshFxpStatPacket, *sshFxpFstatPacket: + method = "Stat" + case *sshFxpLstatPacket: + method = "Lstat" + case *sshFxpRmdirPacket: + method = "Rmdir" + case *sshFxpReadlinkPacket: + method = "Readlink" + case *sshFxpMkdirPacket: + method = "Mkdir" + case *sshFxpExtendedPacketHardlink: + method = "Link" + } + return method +} diff --git a/vendor/github.com/pkg/sftp/request_windows.go b/vendor/github.com/pkg/sftp/request_windows.go new file mode 100644 index 000000000..bd1d68645 --- /dev/null +++ b/vendor/github.com/pkg/sftp/request_windows.go @@ -0,0 +1,13 @@ +package sftp + +import ( + "syscall" +) + +func fakeFileInfoSys() interface{} { + return syscall.Win32FileAttributeData{} +} + +func testOsSys(sys interface{}) error { + return nil +} diff --git a/vendor/github.com/pkg/sftp/server.go b/vendor/github.com/pkg/sftp/server.go new file mode 100644 index 000000000..2e419f590 --- /dev/null +++ b/vendor/github.com/pkg/sftp/server.go @@ -0,0 +1,634 @@ +package sftp + +// sftp server counterpart + +import ( + "encoding" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "sync" + "syscall" + "time" +) + +const ( + // SftpServerWorkerCount defines the number of workers for the SFTP server + SftpServerWorkerCount = 8 +) + +// Server is an SSH File Transfer Protocol (sftp) server. +// This is intended to provide the sftp subsystem to an ssh server daemon. +// This implementation currently supports most of sftp server protocol version 3, +// as specified at https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt +type Server struct { + *serverConn + debugStream io.Writer + readOnly bool + pktMgr *packetManager + openFiles map[string]*os.File + openFilesLock sync.RWMutex + handleCount int + workDir string +} + +func (svr *Server) nextHandle(f *os.File) string { + svr.openFilesLock.Lock() + defer svr.openFilesLock.Unlock() + svr.handleCount++ + handle := strconv.Itoa(svr.handleCount) + svr.openFiles[handle] = f + return handle +} + +func (svr *Server) closeHandle(handle string) error { + svr.openFilesLock.Lock() + defer svr.openFilesLock.Unlock() + if f, ok := svr.openFiles[handle]; ok { + delete(svr.openFiles, handle) + return f.Close() + } + + return EBADF +} + +func (svr *Server) getHandle(handle string) (*os.File, bool) { + svr.openFilesLock.RLock() + defer svr.openFilesLock.RUnlock() + f, ok := svr.openFiles[handle] + return f, ok +} + +type serverRespondablePacket interface { + encoding.BinaryUnmarshaler + id() uint32 + respond(svr *Server) responsePacket +} + +// NewServer creates a new Server instance around the provided streams, serving +// content from the root of the filesystem. Optionally, ServerOption +// functions may be specified to further configure the Server. +// +// A subsequent call to Serve() is required to begin serving files over SFTP. +func NewServer(rwc io.ReadWriteCloser, options ...ServerOption) (*Server, error) { + svrConn := &serverConn{ + conn: conn{ + Reader: rwc, + WriteCloser: rwc, + }, + } + s := &Server{ + serverConn: svrConn, + debugStream: ioutil.Discard, + pktMgr: newPktMgr(svrConn), + openFiles: make(map[string]*os.File), + } + + for _, o := range options { + if err := o(s); err != nil { + return nil, err + } + } + + return s, nil +} + +// A ServerOption is a function which applies configuration to a Server. +type ServerOption func(*Server) error + +// WithDebug enables Server debugging output to the supplied io.Writer. +func WithDebug(w io.Writer) ServerOption { + return func(s *Server) error { + s.debugStream = w + return nil + } +} + +// ReadOnly configures a Server to serve files in read-only mode. +func ReadOnly() ServerOption { + return func(s *Server) error { + s.readOnly = true + return nil + } +} + +// WithAllocator enable the allocator. +// After processing a packet we keep in memory the allocated slices +// and we reuse them for new packets. +// The allocator is experimental +func WithAllocator() ServerOption { + return func(s *Server) error { + alloc := newAllocator() + s.pktMgr.alloc = alloc + s.conn.alloc = alloc + return nil + } +} + +// WithServerWorkingDirectory sets a working directory to use as base +// for relative paths. +// If unset the default is current working directory (os.Getwd). +func WithServerWorkingDirectory(workDir string) ServerOption { + return func(s *Server) error { + s.workDir = cleanPath(workDir) + return nil + } +} + +type rxPacket struct { + pktType fxp + pktBytes []byte +} + +// Up to N parallel servers +func (svr *Server) sftpServerWorker(pktChan chan orderedRequest) error { + for pkt := range pktChan { + // readonly checks + readonly := true + switch pkt := pkt.requestPacket.(type) { + case notReadOnly: + readonly = false + case *sshFxpOpenPacket: + readonly = pkt.readonly() + case *sshFxpExtendedPacket: + readonly = pkt.readonly() + } + + // If server is operating read-only and a write operation is requested, + // return permission denied + if !readonly && svr.readOnly { + svr.pktMgr.readyPacket( + svr.pktMgr.newOrderedResponse(statusFromError(pkt.id(), syscall.EPERM), pkt.orderID()), + ) + continue + } + + if err := handlePacket(svr, pkt); err != nil { + return err + } + } + return nil +} + +func handlePacket(s *Server, p orderedRequest) error { + var rpkt responsePacket + orderID := p.orderID() + switch p := p.requestPacket.(type) { + case *sshFxInitPacket: + rpkt = &sshFxVersionPacket{ + Version: sftpProtocolVersion, + Extensions: sftpExtensions, + } + case *sshFxpStatPacket: + // stat the requested file + info, err := os.Stat(s.toLocalPath(p.Path)) + rpkt = &sshFxpStatResponse{ + ID: p.ID, + info: info, + } + if err != nil { + rpkt = statusFromError(p.ID, err) + } + case *sshFxpLstatPacket: + // stat the requested file + info, err := os.Lstat(s.toLocalPath(p.Path)) + rpkt = &sshFxpStatResponse{ + ID: p.ID, + info: info, + } + if err != nil { + rpkt = statusFromError(p.ID, err) + } + case *sshFxpFstatPacket: + f, ok := s.getHandle(p.Handle) + var err error = EBADF + var info os.FileInfo + if ok { + info, err = f.Stat() + rpkt = &sshFxpStatResponse{ + ID: p.ID, + info: info, + } + } + if err != nil { + rpkt = statusFromError(p.ID, err) + } + case *sshFxpMkdirPacket: + // TODO FIXME: ignore flags field + err := os.Mkdir(s.toLocalPath(p.Path), 0o755) + rpkt = statusFromError(p.ID, err) + case *sshFxpRmdirPacket: + err := os.Remove(s.toLocalPath(p.Path)) + rpkt = statusFromError(p.ID, err) + case *sshFxpRemovePacket: + err := os.Remove(s.toLocalPath(p.Filename)) + rpkt = statusFromError(p.ID, err) + case *sshFxpRenamePacket: + err := os.Rename(s.toLocalPath(p.Oldpath), s.toLocalPath(p.Newpath)) + rpkt = statusFromError(p.ID, err) + case *sshFxpSymlinkPacket: + err := os.Symlink(s.toLocalPath(p.Targetpath), s.toLocalPath(p.Linkpath)) + rpkt = statusFromError(p.ID, err) + case *sshFxpClosePacket: + rpkt = statusFromError(p.ID, s.closeHandle(p.Handle)) + case *sshFxpReadlinkPacket: + f, err := os.Readlink(s.toLocalPath(p.Path)) + rpkt = &sshFxpNamePacket{ + ID: p.ID, + NameAttrs: []*sshFxpNameAttr{ + { + Name: f, + LongName: f, + Attrs: emptyFileStat, + }, + }, + } + if err != nil { + rpkt = statusFromError(p.ID, err) + } + case *sshFxpRealpathPacket: + f, err := filepath.Abs(s.toLocalPath(p.Path)) + f = cleanPath(f) + rpkt = &sshFxpNamePacket{ + ID: p.ID, + NameAttrs: []*sshFxpNameAttr{ + { + Name: f, + LongName: f, + Attrs: emptyFileStat, + }, + }, + } + if err != nil { + rpkt = statusFromError(p.ID, err) + } + case *sshFxpOpendirPacket: + lp := s.toLocalPath(p.Path) + + if stat, err := os.Stat(lp); err != nil { + rpkt = statusFromError(p.ID, err) + } else if !stat.IsDir() { + rpkt = statusFromError(p.ID, &os.PathError{ + Path: lp, Err: syscall.ENOTDIR, + }) + } else { + rpkt = (&sshFxpOpenPacket{ + ID: p.ID, + Path: p.Path, + Pflags: sshFxfRead, + }).respond(s) + } + case *sshFxpReadPacket: + var err error = EBADF + f, ok := s.getHandle(p.Handle) + if ok { + err = nil + data := p.getDataSlice(s.pktMgr.alloc, orderID) + n, _err := f.ReadAt(data, int64(p.Offset)) + if _err != nil && (_err != io.EOF || n == 0) { + err = _err + } + rpkt = &sshFxpDataPacket{ + ID: p.ID, + Length: uint32(n), + Data: data[:n], + // do not use data[:n:n] here to clamp the capacity, we allocated extra capacity above to avoid reallocations + } + } + if err != nil { + rpkt = statusFromError(p.ID, err) + } + + case *sshFxpWritePacket: + f, ok := s.getHandle(p.Handle) + var err error = EBADF + if ok { + _, err = f.WriteAt(p.Data, int64(p.Offset)) + } + rpkt = statusFromError(p.ID, err) + case *sshFxpExtendedPacket: + if p.SpecificPacket == nil { + rpkt = statusFromError(p.ID, ErrSSHFxOpUnsupported) + } else { + rpkt = p.respond(s) + } + case serverRespondablePacket: + rpkt = p.respond(s) + default: + return fmt.Errorf("unexpected packet type %T", p) + } + + s.pktMgr.readyPacket(s.pktMgr.newOrderedResponse(rpkt, orderID)) + return nil +} + +// Serve serves SFTP connections until the streams stop or the SFTP subsystem +// is stopped. It returns nil if the server exits cleanly. +func (svr *Server) Serve() error { + defer func() { + if svr.pktMgr.alloc != nil { + svr.pktMgr.alloc.Free() + } + }() + var wg sync.WaitGroup + runWorker := func(ch chan orderedRequest) { + wg.Add(1) + go func() { + defer wg.Done() + if err := svr.sftpServerWorker(ch); err != nil { + svr.conn.Close() // shuts down recvPacket + } + }() + } + pktChan := svr.pktMgr.workerChan(runWorker) + + var err error + var pkt requestPacket + var pktType uint8 + var pktBytes []byte + for { + pktType, pktBytes, err = svr.serverConn.recvPacket(svr.pktMgr.getNextOrderID()) + if err != nil { + // Check whether the connection terminated cleanly in-between packets. + if err == io.EOF { + err = nil + } + // we don't care about releasing allocated pages here, the server will quit and the allocator freed + break + } + + pkt, err = makePacket(rxPacket{fxp(pktType), pktBytes}) + if err != nil { + switch { + case errors.Is(err, errUnknownExtendedPacket): + //if err := svr.serverConn.sendError(pkt, ErrSshFxOpUnsupported); err != nil { + // debug("failed to send err packet: %v", err) + // svr.conn.Close() // shuts down recvPacket + // break + //} + default: + debug("makePacket err: %v", err) + svr.conn.Close() // shuts down recvPacket + break + } + } + + pktChan <- svr.pktMgr.newOrderedRequest(pkt) + } + + close(pktChan) // shuts down sftpServerWorkers + wg.Wait() // wait for all workers to exit + + // close any still-open files + for handle, file := range svr.openFiles { + fmt.Fprintf(svr.debugStream, "sftp server file with handle %q left open: %v\n", handle, file.Name()) + file.Close() + } + return err // error from recvPacket +} + +type ider interface { + id() uint32 +} + +// The init packet has no ID, so we just return a zero-value ID +func (p *sshFxInitPacket) id() uint32 { return 0 } + +type sshFxpStatResponse struct { + ID uint32 + info os.FileInfo +} + +func (p *sshFxpStatResponse) marshalPacket() ([]byte, []byte, error) { + l := 4 + 1 + 4 // uint32(length) + byte(type) + uint32(id) + + b := make([]byte, 4, l) + b = append(b, sshFxpAttrs) + b = marshalUint32(b, p.ID) + + var payload []byte + payload = marshalFileInfo(payload, p.info) + + return b, payload, nil +} + +func (p *sshFxpStatResponse) MarshalBinary() ([]byte, error) { + header, payload, err := p.marshalPacket() + return append(header, payload...), err +} + +var emptyFileStat = []interface{}{uint32(0)} + +func (p *sshFxpOpenPacket) readonly() bool { + return !p.hasPflags(sshFxfWrite) +} + +func (p *sshFxpOpenPacket) hasPflags(flags ...uint32) bool { + for _, f := range flags { + if p.Pflags&f == 0 { + return false + } + } + return true +} + +func (p *sshFxpOpenPacket) respond(svr *Server) responsePacket { + var osFlags int + if p.hasPflags(sshFxfRead, sshFxfWrite) { + osFlags |= os.O_RDWR + } else if p.hasPflags(sshFxfWrite) { + osFlags |= os.O_WRONLY + } else if p.hasPflags(sshFxfRead) { + osFlags |= os.O_RDONLY + } else { + // how are they opening? + return statusFromError(p.ID, syscall.EINVAL) + } + + // Don't use O_APPEND flag as it conflicts with WriteAt. + // The sshFxfAppend flag is a no-op here as the client sends the offsets. + + if p.hasPflags(sshFxfCreat) { + osFlags |= os.O_CREATE + } + if p.hasPflags(sshFxfTrunc) { + osFlags |= os.O_TRUNC + } + if p.hasPflags(sshFxfExcl) { + osFlags |= os.O_EXCL + } + + f, err := os.OpenFile(svr.toLocalPath(p.Path), osFlags, 0o644) + if err != nil { + return statusFromError(p.ID, err) + } + + handle := svr.nextHandle(f) + return &sshFxpHandlePacket{ID: p.ID, Handle: handle} +} + +func (p *sshFxpReaddirPacket) respond(svr *Server) responsePacket { + f, ok := svr.getHandle(p.Handle) + if !ok { + return statusFromError(p.ID, EBADF) + } + + dirents, err := f.Readdir(128) + if err != nil { + return statusFromError(p.ID, err) + } + + idLookup := osIDLookup{} + + ret := &sshFxpNamePacket{ID: p.ID} + for _, dirent := range dirents { + ret.NameAttrs = append(ret.NameAttrs, &sshFxpNameAttr{ + Name: dirent.Name(), + LongName: runLs(idLookup, dirent), + Attrs: []interface{}{dirent}, + }) + } + return ret +} + +func (p *sshFxpSetstatPacket) respond(svr *Server) responsePacket { + // additional unmarshalling is required for each possibility here + b := p.Attrs.([]byte) + var err error + + p.Path = svr.toLocalPath(p.Path) + + debug("setstat name \"%s\"", p.Path) + if (p.Flags & sshFileXferAttrSize) != 0 { + var size uint64 + if size, b, err = unmarshalUint64Safe(b); err == nil { + err = os.Truncate(p.Path, int64(size)) + } + } + if (p.Flags & sshFileXferAttrPermissions) != 0 { + var mode uint32 + if mode, b, err = unmarshalUint32Safe(b); err == nil { + err = os.Chmod(p.Path, os.FileMode(mode)) + } + } + if (p.Flags & sshFileXferAttrACmodTime) != 0 { + var atime uint32 + var mtime uint32 + if atime, b, err = unmarshalUint32Safe(b); err != nil { + } else if mtime, b, err = unmarshalUint32Safe(b); err != nil { + } else { + atimeT := time.Unix(int64(atime), 0) + mtimeT := time.Unix(int64(mtime), 0) + err = os.Chtimes(p.Path, atimeT, mtimeT) + } + } + if (p.Flags & sshFileXferAttrUIDGID) != 0 { + var uid uint32 + var gid uint32 + if uid, b, err = unmarshalUint32Safe(b); err != nil { + } else if gid, _, err = unmarshalUint32Safe(b); err != nil { + } else { + err = os.Chown(p.Path, int(uid), int(gid)) + } + } + + return statusFromError(p.ID, err) +} + +func (p *sshFxpFsetstatPacket) respond(svr *Server) responsePacket { + f, ok := svr.getHandle(p.Handle) + if !ok { + return statusFromError(p.ID, EBADF) + } + + // additional unmarshalling is required for each possibility here + b := p.Attrs.([]byte) + var err error + + debug("fsetstat name \"%s\"", f.Name()) + if (p.Flags & sshFileXferAttrSize) != 0 { + var size uint64 + if size, b, err = unmarshalUint64Safe(b); err == nil { + err = f.Truncate(int64(size)) + } + } + if (p.Flags & sshFileXferAttrPermissions) != 0 { + var mode uint32 + if mode, b, err = unmarshalUint32Safe(b); err == nil { + err = f.Chmod(os.FileMode(mode)) + } + } + if (p.Flags & sshFileXferAttrACmodTime) != 0 { + var atime uint32 + var mtime uint32 + if atime, b, err = unmarshalUint32Safe(b); err != nil { + } else if mtime, b, err = unmarshalUint32Safe(b); err != nil { + } else { + atimeT := time.Unix(int64(atime), 0) + mtimeT := time.Unix(int64(mtime), 0) + err = os.Chtimes(f.Name(), atimeT, mtimeT) + } + } + if (p.Flags & sshFileXferAttrUIDGID) != 0 { + var uid uint32 + var gid uint32 + if uid, b, err = unmarshalUint32Safe(b); err != nil { + } else if gid, _, err = unmarshalUint32Safe(b); err != nil { + } else { + err = f.Chown(int(uid), int(gid)) + } + } + + return statusFromError(p.ID, err) +} + +func statusFromError(id uint32, err error) *sshFxpStatusPacket { + ret := &sshFxpStatusPacket{ + ID: id, + StatusError: StatusError{ + // sshFXOk = 0 + // sshFXEOF = 1 + // sshFXNoSuchFile = 2 ENOENT + // sshFXPermissionDenied = 3 + // sshFXFailure = 4 + // sshFXBadMessage = 5 + // sshFXNoConnection = 6 + // sshFXConnectionLost = 7 + // sshFXOPUnsupported = 8 + Code: sshFxOk, + }, + } + if err == nil { + return ret + } + + debug("statusFromError: error is %T %#v", err, err) + ret.StatusError.Code = sshFxFailure + ret.StatusError.msg = err.Error() + + if os.IsNotExist(err) { + ret.StatusError.Code = sshFxNoSuchFile + return ret + } + if code, ok := translateSyscallError(err); ok { + ret.StatusError.Code = code + return ret + } + + if errors.Is(err, io.EOF) { + ret.StatusError.Code = sshFxEOF + return ret + } + + var e fxerr + if errors.As(err, &e) { + ret.StatusError.Code = uint32(e) + return ret + } + + return ret +} diff --git a/vendor/github.com/pkg/sftp/server_plan9.go b/vendor/github.com/pkg/sftp/server_plan9.go new file mode 100644 index 000000000..4e8ed0678 --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_plan9.go @@ -0,0 +1,27 @@ +package sftp + +import ( + "path" + "path/filepath" +) + +func (s *Server) toLocalPath(p string) string { + if s.workDir != "" && !path.IsAbs(p) { + p = path.Join(s.workDir, p) + } + + lp := filepath.FromSlash(p) + + if path.IsAbs(p) { + tmp := lp[1:] + + if filepath.IsAbs(tmp) { + // If the FromSlash without any starting slashes is absolute, + // then we have a filepath encoded with a prefix '/'. + // e.g. "/#s/boot" to "#s/boot" + return tmp + } + } + + return lp +} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_darwin.go b/vendor/github.com/pkg/sftp/server_statvfs_darwin.go new file mode 100644 index 000000000..8c01dac52 --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_statvfs_darwin.go @@ -0,0 +1,21 @@ +package sftp + +import ( + "syscall" +) + +func statvfsFromStatfst(stat *syscall.Statfs_t) (*StatVFS, error) { + return &StatVFS{ + Bsize: uint64(stat.Bsize), + Frsize: uint64(stat.Bsize), // fragment size is a linux thing; use block size here + Blocks: stat.Blocks, + Bfree: stat.Bfree, + Bavail: stat.Bavail, + Files: stat.Files, + Ffree: stat.Ffree, + Favail: stat.Ffree, // not sure how to calculate Favail + Fsid: uint64(uint64(stat.Fsid.Val[1])<<32 | uint64(stat.Fsid.Val[0])), // endianness? + Flag: uint64(stat.Flags), // assuming POSIX? + Namemax: 1024, // man 2 statfs shows: #define MAXPATHLEN 1024 + }, nil +} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_impl.go b/vendor/github.com/pkg/sftp/server_statvfs_impl.go new file mode 100644 index 000000000..a5470798c --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_statvfs_impl.go @@ -0,0 +1,30 @@ +//go:build darwin || linux +// +build darwin linux + +// fill in statvfs structure with OS specific values +// Statfs_t is different per-kernel, and only exists on some unixes (not Solaris for instance) + +package sftp + +import ( + "syscall" +) + +func (p *sshFxpExtendedPacketStatVFS) respond(svr *Server) responsePacket { + retPkt, err := getStatVFSForPath(p.Path) + if err != nil { + return statusFromError(p.ID, err) + } + retPkt.ID = p.ID + + return retPkt +} + +func getStatVFSForPath(name string) (*StatVFS, error) { + var stat syscall.Statfs_t + if err := syscall.Statfs(name, &stat); err != nil { + return nil, err + } + + return statvfsFromStatfst(&stat) +} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_linux.go b/vendor/github.com/pkg/sftp/server_statvfs_linux.go new file mode 100644 index 000000000..615c4157a --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_statvfs_linux.go @@ -0,0 +1,23 @@ +//go:build linux +// +build linux + +package sftp + +import ( + "syscall" +) + +func statvfsFromStatfst(stat *syscall.Statfs_t) (*StatVFS, error) { + return &StatVFS{ + Bsize: uint64(stat.Bsize), + Frsize: uint64(stat.Frsize), + Blocks: stat.Blocks, + Bfree: stat.Bfree, + Bavail: stat.Bavail, + Files: stat.Files, + Ffree: stat.Ffree, + Favail: stat.Ffree, // not sure how to calculate Favail + Flag: uint64(stat.Flags), // assuming POSIX? + Namemax: uint64(stat.Namelen), + }, nil +} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_plan9.go b/vendor/github.com/pkg/sftp/server_statvfs_plan9.go new file mode 100644 index 000000000..e71a27d37 --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_statvfs_plan9.go @@ -0,0 +1,13 @@ +package sftp + +import ( + "syscall" +) + +func (p *sshFxpExtendedPacketStatVFS) respond(svr *Server) responsePacket { + return statusFromError(p.ID, syscall.EPLAN9) +} + +func getStatVFSForPath(name string) (*StatVFS, error) { + return nil, syscall.EPLAN9 +} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_stubs.go b/vendor/github.com/pkg/sftp/server_statvfs_stubs.go new file mode 100644 index 000000000..dd4705bb4 --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_statvfs_stubs.go @@ -0,0 +1,16 @@ +//go:build !darwin && !linux && !plan9 +// +build !darwin,!linux,!plan9 + +package sftp + +import ( + "syscall" +) + +func (p *sshFxpExtendedPacketStatVFS) respond(svr *Server) responsePacket { + return statusFromError(p.ID, syscall.ENOTSUP) +} + +func getStatVFSForPath(name string) (*StatVFS, error) { + return nil, syscall.ENOTSUP +} diff --git a/vendor/github.com/pkg/sftp/server_unix.go b/vendor/github.com/pkg/sftp/server_unix.go new file mode 100644 index 000000000..495b397c0 --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_unix.go @@ -0,0 +1,16 @@ +//go:build !windows && !plan9 +// +build !windows,!plan9 + +package sftp + +import ( + "path" +) + +func (s *Server) toLocalPath(p string) string { + if s.workDir != "" && !path.IsAbs(p) { + p = path.Join(s.workDir, p) + } + + return p +} diff --git a/vendor/github.com/pkg/sftp/server_windows.go b/vendor/github.com/pkg/sftp/server_windows.go new file mode 100644 index 000000000..b35be7305 --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_windows.go @@ -0,0 +1,39 @@ +package sftp + +import ( + "path" + "path/filepath" +) + +func (s *Server) toLocalPath(p string) string { + if s.workDir != "" && !path.IsAbs(p) { + p = path.Join(s.workDir, p) + } + + lp := filepath.FromSlash(p) + + if path.IsAbs(p) { + tmp := lp + for len(tmp) > 0 && tmp[0] == '\\' { + tmp = tmp[1:] + } + + if filepath.IsAbs(tmp) { + // If the FromSlash without any starting slashes is absolute, + // then we have a filepath encoded with a prefix '/'. + // e.g. "/C:/Windows" to "C:\\Windows" + return tmp + } + + tmp += "\\" + + if filepath.IsAbs(tmp) { + // If the FromSlash without any starting slashes but with extra end slash is absolute, + // then we have a filepath encoded with a prefix '/' and a dropped '/' at the end. + // e.g. "/C:" to "C:\\" + return tmp + } + } + + return lp +} diff --git a/vendor/github.com/pkg/sftp/sftp.go b/vendor/github.com/pkg/sftp/sftp.go new file mode 100644 index 000000000..778c8f3d7 --- /dev/null +++ b/vendor/github.com/pkg/sftp/sftp.go @@ -0,0 +1,258 @@ +// Package sftp implements the SSH File Transfer Protocol as described in +// https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt +package sftp + +import ( + "fmt" +) + +const ( + sshFxpInit = 1 + sshFxpVersion = 2 + sshFxpOpen = 3 + sshFxpClose = 4 + sshFxpRead = 5 + sshFxpWrite = 6 + sshFxpLstat = 7 + sshFxpFstat = 8 + sshFxpSetstat = 9 + sshFxpFsetstat = 10 + sshFxpOpendir = 11 + sshFxpReaddir = 12 + sshFxpRemove = 13 + sshFxpMkdir = 14 + sshFxpRmdir = 15 + sshFxpRealpath = 16 + sshFxpStat = 17 + sshFxpRename = 18 + sshFxpReadlink = 19 + sshFxpSymlink = 20 + sshFxpStatus = 101 + sshFxpHandle = 102 + sshFxpData = 103 + sshFxpName = 104 + sshFxpAttrs = 105 + sshFxpExtended = 200 + sshFxpExtendedReply = 201 +) + +const ( + sshFxOk = 0 + sshFxEOF = 1 + sshFxNoSuchFile = 2 + sshFxPermissionDenied = 3 + sshFxFailure = 4 + sshFxBadMessage = 5 + sshFxNoConnection = 6 + sshFxConnectionLost = 7 + sshFxOPUnsupported = 8 + + // see draft-ietf-secsh-filexfer-13 + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13#section-9.1 + sshFxInvalidHandle = 9 + sshFxNoSuchPath = 10 + sshFxFileAlreadyExists = 11 + sshFxWriteProtect = 12 + sshFxNoMedia = 13 + sshFxNoSpaceOnFilesystem = 14 + sshFxQuotaExceeded = 15 + sshFxUnknownPrincipal = 16 + sshFxLockConflict = 17 + sshFxDirNotEmpty = 18 + sshFxNotADirectory = 19 + sshFxInvalidFilename = 20 + sshFxLinkLoop = 21 + sshFxCannotDelete = 22 + sshFxInvalidParameter = 23 + sshFxFileIsADirectory = 24 + sshFxByteRangeLockConflict = 25 + sshFxByteRangeLockRefused = 26 + sshFxDeletePending = 27 + sshFxFileCorrupt = 28 + sshFxOwnerInvalid = 29 + sshFxGroupInvalid = 30 + sshFxNoMatchingByteRangeLock = 31 +) + +const ( + sshFxfRead = 0x00000001 + sshFxfWrite = 0x00000002 + sshFxfAppend = 0x00000004 + sshFxfCreat = 0x00000008 + sshFxfTrunc = 0x00000010 + sshFxfExcl = 0x00000020 +) + +var ( + // supportedSFTPExtensions defines the supported extensions + supportedSFTPExtensions = []sshExtensionPair{ + {"hardlink@openssh.com", "1"}, + {"posix-rename@openssh.com", "1"}, + {"statvfs@openssh.com", "2"}, + } + sftpExtensions = supportedSFTPExtensions +) + +type fxp uint8 + +func (f fxp) String() string { + switch f { + case sshFxpInit: + return "SSH_FXP_INIT" + case sshFxpVersion: + return "SSH_FXP_VERSION" + case sshFxpOpen: + return "SSH_FXP_OPEN" + case sshFxpClose: + return "SSH_FXP_CLOSE" + case sshFxpRead: + return "SSH_FXP_READ" + case sshFxpWrite: + return "SSH_FXP_WRITE" + case sshFxpLstat: + return "SSH_FXP_LSTAT" + case sshFxpFstat: + return "SSH_FXP_FSTAT" + case sshFxpSetstat: + return "SSH_FXP_SETSTAT" + case sshFxpFsetstat: + return "SSH_FXP_FSETSTAT" + case sshFxpOpendir: + return "SSH_FXP_OPENDIR" + case sshFxpReaddir: + return "SSH_FXP_READDIR" + case sshFxpRemove: + return "SSH_FXP_REMOVE" + case sshFxpMkdir: + return "SSH_FXP_MKDIR" + case sshFxpRmdir: + return "SSH_FXP_RMDIR" + case sshFxpRealpath: + return "SSH_FXP_REALPATH" + case sshFxpStat: + return "SSH_FXP_STAT" + case sshFxpRename: + return "SSH_FXP_RENAME" + case sshFxpReadlink: + return "SSH_FXP_READLINK" + case sshFxpSymlink: + return "SSH_FXP_SYMLINK" + case sshFxpStatus: + return "SSH_FXP_STATUS" + case sshFxpHandle: + return "SSH_FXP_HANDLE" + case sshFxpData: + return "SSH_FXP_DATA" + case sshFxpName: + return "SSH_FXP_NAME" + case sshFxpAttrs: + return "SSH_FXP_ATTRS" + case sshFxpExtended: + return "SSH_FXP_EXTENDED" + case sshFxpExtendedReply: + return "SSH_FXP_EXTENDED_REPLY" + default: + return "unknown" + } +} + +type fx uint8 + +func (f fx) String() string { + switch f { + case sshFxOk: + return "SSH_FX_OK" + case sshFxEOF: + return "SSH_FX_EOF" + case sshFxNoSuchFile: + return "SSH_FX_NO_SUCH_FILE" + case sshFxPermissionDenied: + return "SSH_FX_PERMISSION_DENIED" + case sshFxFailure: + return "SSH_FX_FAILURE" + case sshFxBadMessage: + return "SSH_FX_BAD_MESSAGE" + case sshFxNoConnection: + return "SSH_FX_NO_CONNECTION" + case sshFxConnectionLost: + return "SSH_FX_CONNECTION_LOST" + case sshFxOPUnsupported: + return "SSH_FX_OP_UNSUPPORTED" + default: + return "unknown" + } +} + +type unexpectedPacketErr struct { + want, got uint8 +} + +func (u *unexpectedPacketErr) Error() string { + return fmt.Sprintf("sftp: unexpected packet: want %v, got %v", fxp(u.want), fxp(u.got)) +} + +func unimplementedPacketErr(u uint8) error { + return fmt.Errorf("sftp: unimplemented packet type: got %v", fxp(u)) +} + +type unexpectedIDErr struct{ want, got uint32 } + +func (u *unexpectedIDErr) Error() string { + return fmt.Sprintf("sftp: unexpected id: want %d, got %d", u.want, u.got) +} + +func unimplementedSeekWhence(whence int) error { + return fmt.Errorf("sftp: unimplemented seek whence %d", whence) +} + +func unexpectedCount(want, got uint32) error { + return fmt.Errorf("sftp: unexpected count: want %d, got %d", want, got) +} + +type unexpectedVersionErr struct{ want, got uint32 } + +func (u *unexpectedVersionErr) Error() string { + return fmt.Sprintf("sftp: unexpected server version: want %v, got %v", u.want, u.got) +} + +// A StatusError is returned when an SFTP operation fails, and provides +// additional information about the failure. +type StatusError struct { + Code uint32 + msg, lang string +} + +func (s *StatusError) Error() string { + return fmt.Sprintf("sftp: %q (%v)", s.msg, fx(s.Code)) +} + +// FxCode returns the error code typed to match against the exported codes +func (s *StatusError) FxCode() fxerr { + return fxerr(s.Code) +} + +func getSupportedExtensionByName(extensionName string) (sshExtensionPair, error) { + for _, supportedExtension := range supportedSFTPExtensions { + if supportedExtension.Name == extensionName { + return supportedExtension, nil + } + } + return sshExtensionPair{}, fmt.Errorf("unsupported extension: %s", extensionName) +} + +// SetSFTPExtensions allows to customize the supported server extensions. +// See the variable supportedSFTPExtensions for supported extensions. +// This method accepts a slice of sshExtensionPair names for example 'hardlink@openssh.com'. +// If an invalid extension is given an error will be returned and nothing will be changed +func SetSFTPExtensions(extensions ...string) error { + tempExtensions := []sshExtensionPair{} + for _, extension := range extensions { + sftpExtension, err := getSupportedExtensionByName(extension) + if err != nil { + return err + } + tempExtensions = append(tempExtensions, sftpExtension) + } + sftpExtensions = tempExtensions + return nil +} diff --git a/vendor/github.com/pkg/sftp/stat_plan9.go b/vendor/github.com/pkg/sftp/stat_plan9.go new file mode 100644 index 000000000..761abdf56 --- /dev/null +++ b/vendor/github.com/pkg/sftp/stat_plan9.go @@ -0,0 +1,103 @@ +package sftp + +import ( + "os" + "syscall" +) + +var EBADF = syscall.NewError("fd out of range or not open") + +func wrapPathError(filepath string, err error) error { + if errno, ok := err.(syscall.ErrorString); ok { + return &os.PathError{Path: filepath, Err: errno} + } + return err +} + +// translateErrno translates a syscall error number to a SFTP error code. +func translateErrno(errno syscall.ErrorString) uint32 { + switch errno { + case "": + return sshFxOk + case syscall.ENOENT: + return sshFxNoSuchFile + case syscall.EPERM: + return sshFxPermissionDenied + } + + return sshFxFailure +} + +func translateSyscallError(err error) (uint32, bool) { + switch e := err.(type) { + case syscall.ErrorString: + return translateErrno(e), true + case *os.PathError: + debug("statusFromError,pathError: error is %T %#v", e.Err, e.Err) + if errno, ok := e.Err.(syscall.ErrorString); ok { + return translateErrno(errno), true + } + } + return 0, false +} + +// isRegular returns true if the mode describes a regular file. +func isRegular(mode uint32) bool { + return mode&S_IFMT == syscall.S_IFREG +} + +// toFileMode converts sftp filemode bits to the os.FileMode specification +func toFileMode(mode uint32) os.FileMode { + var fm = os.FileMode(mode & 0777) + + switch mode & S_IFMT { + case syscall.S_IFBLK: + fm |= os.ModeDevice + case syscall.S_IFCHR: + fm |= os.ModeDevice | os.ModeCharDevice + case syscall.S_IFDIR: + fm |= os.ModeDir + case syscall.S_IFIFO: + fm |= os.ModeNamedPipe + case syscall.S_IFLNK: + fm |= os.ModeSymlink + case syscall.S_IFREG: + // nothing to do + case syscall.S_IFSOCK: + fm |= os.ModeSocket + } + + return fm +} + +// fromFileMode converts from the os.FileMode specification to sftp filemode bits +func fromFileMode(mode os.FileMode) uint32 { + ret := uint32(mode & os.ModePerm) + + switch mode & os.ModeType { + case os.ModeDevice | os.ModeCharDevice: + ret |= syscall.S_IFCHR + case os.ModeDevice: + ret |= syscall.S_IFBLK + case os.ModeDir: + ret |= syscall.S_IFDIR + case os.ModeNamedPipe: + ret |= syscall.S_IFIFO + case os.ModeSymlink: + ret |= syscall.S_IFLNK + case 0: + ret |= syscall.S_IFREG + case os.ModeSocket: + ret |= syscall.S_IFSOCK + } + + return ret +} + +// Plan 9 doesn't have setuid, setgid or sticky, but a Plan 9 client should +// be able to send these bits to a POSIX server. +const ( + s_ISUID = 04000 + s_ISGID = 02000 + s_ISVTX = 01000 +) diff --git a/vendor/github.com/pkg/sftp/stat_posix.go b/vendor/github.com/pkg/sftp/stat_posix.go new file mode 100644 index 000000000..5b870e23c --- /dev/null +++ b/vendor/github.com/pkg/sftp/stat_posix.go @@ -0,0 +1,124 @@ +//go:build !plan9 +// +build !plan9 + +package sftp + +import ( + "os" + "syscall" +) + +const EBADF = syscall.EBADF + +func wrapPathError(filepath string, err error) error { + if errno, ok := err.(syscall.Errno); ok { + return &os.PathError{Path: filepath, Err: errno} + } + return err +} + +// translateErrno translates a syscall error number to a SFTP error code. +func translateErrno(errno syscall.Errno) uint32 { + switch errno { + case 0: + return sshFxOk + case syscall.ENOENT: + return sshFxNoSuchFile + case syscall.EACCES, syscall.EPERM: + return sshFxPermissionDenied + } + + return sshFxFailure +} + +func translateSyscallError(err error) (uint32, bool) { + switch e := err.(type) { + case syscall.Errno: + return translateErrno(e), true + case *os.PathError: + debug("statusFromError,pathError: error is %T %#v", e.Err, e.Err) + if errno, ok := e.Err.(syscall.Errno); ok { + return translateErrno(errno), true + } + } + return 0, false +} + +// isRegular returns true if the mode describes a regular file. +func isRegular(mode uint32) bool { + return mode&S_IFMT == syscall.S_IFREG +} + +// toFileMode converts sftp filemode bits to the os.FileMode specification +func toFileMode(mode uint32) os.FileMode { + var fm = os.FileMode(mode & 0777) + + switch mode & S_IFMT { + case syscall.S_IFBLK: + fm |= os.ModeDevice + case syscall.S_IFCHR: + fm |= os.ModeDevice | os.ModeCharDevice + case syscall.S_IFDIR: + fm |= os.ModeDir + case syscall.S_IFIFO: + fm |= os.ModeNamedPipe + case syscall.S_IFLNK: + fm |= os.ModeSymlink + case syscall.S_IFREG: + // nothing to do + case syscall.S_IFSOCK: + fm |= os.ModeSocket + } + + if mode&syscall.S_ISUID != 0 { + fm |= os.ModeSetuid + } + if mode&syscall.S_ISGID != 0 { + fm |= os.ModeSetgid + } + if mode&syscall.S_ISVTX != 0 { + fm |= os.ModeSticky + } + + return fm +} + +// fromFileMode converts from the os.FileMode specification to sftp filemode bits +func fromFileMode(mode os.FileMode) uint32 { + ret := uint32(mode & os.ModePerm) + + switch mode & os.ModeType { + case os.ModeDevice | os.ModeCharDevice: + ret |= syscall.S_IFCHR + case os.ModeDevice: + ret |= syscall.S_IFBLK + case os.ModeDir: + ret |= syscall.S_IFDIR + case os.ModeNamedPipe: + ret |= syscall.S_IFIFO + case os.ModeSymlink: + ret |= syscall.S_IFLNK + case 0: + ret |= syscall.S_IFREG + case os.ModeSocket: + ret |= syscall.S_IFSOCK + } + + if mode&os.ModeSetuid != 0 { + ret |= syscall.S_ISUID + } + if mode&os.ModeSetgid != 0 { + ret |= syscall.S_ISGID + } + if mode&os.ModeSticky != 0 { + ret |= syscall.S_ISVTX + } + + return ret +} + +const ( + s_ISUID = syscall.S_ISUID + s_ISGID = syscall.S_ISGID + s_ISVTX = syscall.S_ISVTX +) diff --git a/vendor/github.com/pkg/sftp/syscall_fixed.go b/vendor/github.com/pkg/sftp/syscall_fixed.go new file mode 100644 index 000000000..e84430830 --- /dev/null +++ b/vendor/github.com/pkg/sftp/syscall_fixed.go @@ -0,0 +1,10 @@ +//go:build plan9 || windows || (js && wasm) +// +build plan9 windows js,wasm + +// Go defines S_IFMT on windows, plan9 and js/wasm as 0x1f000 instead of +// 0xf000. None of the the other S_IFxyz values include the "1" (in 0x1f000) +// which prevents them from matching the bitmask. + +package sftp + +const S_IFMT = 0xf000 diff --git a/vendor/github.com/pkg/sftp/syscall_good.go b/vendor/github.com/pkg/sftp/syscall_good.go new file mode 100644 index 000000000..50052189e --- /dev/null +++ b/vendor/github.com/pkg/sftp/syscall_good.go @@ -0,0 +1,10 @@ +//go:build !plan9 && !windows && (!js || !wasm) +// +build !plan9 +// +build !windows +// +build !js !wasm + +package sftp + +import "syscall" + +const S_IFMT = syscall.S_IFMT diff --git a/vendor/github.com/stretchr/objx/.codeclimate.yml b/vendor/github.com/stretchr/objx/.codeclimate.yml new file mode 100644 index 000000000..559fa399c --- /dev/null +++ b/vendor/github.com/stretchr/objx/.codeclimate.yml @@ -0,0 +1,21 @@ +engines: + gofmt: + enabled: true + golint: + enabled: true + govet: + enabled: true + +exclude_patterns: +- ".github/" +- "vendor/" +- "codegen/" +- "*.yml" +- ".*.yml" +- "*.md" +- "Gopkg.*" +- "doc.go" +- "type_specific_codegen_test.go" +- "type_specific_codegen.go" +- ".gitignore" +- "LICENSE" diff --git a/vendor/github.com/stretchr/objx/.gitignore b/vendor/github.com/stretchr/objx/.gitignore new file mode 100644 index 000000000..ea58090bd --- /dev/null +++ b/vendor/github.com/stretchr/objx/.gitignore @@ -0,0 +1,11 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/vendor/github.com/stretchr/objx/LICENSE b/vendor/github.com/stretchr/objx/LICENSE new file mode 100644 index 000000000..44d4d9d5a --- /dev/null +++ b/vendor/github.com/stretchr/objx/LICENSE @@ -0,0 +1,22 @@ +The MIT License + +Copyright (c) 2014 Stretchr, Inc. +Copyright (c) 2017-2018 objx contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/stretchr/objx/README.md b/vendor/github.com/stretchr/objx/README.md new file mode 100644 index 000000000..78dc1f8b0 --- /dev/null +++ b/vendor/github.com/stretchr/objx/README.md @@ -0,0 +1,80 @@ +# Objx +[![Build Status](https://travis-ci.org/stretchr/objx.svg?branch=master)](https://travis-ci.org/stretchr/objx) +[![Go Report Card](https://goreportcard.com/badge/github.com/stretchr/objx)](https://goreportcard.com/report/github.com/stretchr/objx) +[![Maintainability](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/maintainability)](https://codeclimate.com/github/stretchr/objx/maintainability) +[![Test Coverage](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/test_coverage)](https://codeclimate.com/github/stretchr/objx/test_coverage) +[![Sourcegraph](https://sourcegraph.com/github.com/stretchr/objx/-/badge.svg)](https://sourcegraph.com/github.com/stretchr/objx) +[![GoDoc](https://pkg.go.dev/badge/github.com/stretchr/objx?utm_source=godoc)](https://pkg.go.dev/github.com/stretchr/objx) + +Objx - Go package for dealing with maps, slices, JSON and other data. + +Get started: + +- Install Objx with [one line of code](#installation), or [update it with another](#staying-up-to-date) +- Check out the API Documentation http://pkg.go.dev/github.com/stretchr/objx + +## Overview +Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes a powerful `Get` method (among others) that allows you to easily and quickly get access to data within the map, without having to worry too much about type assertions, missing data, default values etc. + +### Pattern +Objx uses a predictable pattern to make access data from within `map[string]interface{}` easy. Call one of the `objx.` functions to create your `objx.Map` to get going: + + m, err := objx.FromJSON(json) + +NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, the rest will be optimistic and try to figure things out without panicking. + +Use `Get` to access the value you're interested in. You can use dot and array +notation too: + + m.Get("places[0].latlng") + +Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type. + + if m.Get("code").IsStr() { // Your code... } + +Or you can just assume the type, and use one of the strong type methods to extract the real value: + + m.Get("code").Int() + +If there's no value there (or if it's the wrong type) then a default value will be returned, or you can be explicit about the default value. + + Get("code").Int(-1) + +If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating, manipulating and selecting that data. You can find out more by exploring the index below. + +### Reading data +A simple example of how to use Objx: + + // Use MustFromJSON to make an objx.Map from some JSON + m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) + + // Get the details + name := m.Get("name").Str() + age := m.Get("age").Int() + + // Get their nickname (or use their name if they don't have one) + nickname := m.Get("nickname").Str(name) + +### Ranging +Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For example, to `range` the data, do what you would expect: + + m := objx.MustFromJSON(json) + for key, value := range m { + // Your code... + } + +## Installation +To install Objx, use go get: + + go get github.com/stretchr/objx + +### Staying up to date +To update Objx to the latest version, run: + + go get -u github.com/stretchr/objx + +### Supported go versions +We currently support the three recent major Go versions. + +## Contributing +Please feel free to submit issues, fork the repository and send pull requests! diff --git a/vendor/github.com/stretchr/objx/Taskfile.yml b/vendor/github.com/stretchr/objx/Taskfile.yml new file mode 100644 index 000000000..8a79e8d67 --- /dev/null +++ b/vendor/github.com/stretchr/objx/Taskfile.yml @@ -0,0 +1,27 @@ +version: '3' + +tasks: + default: + deps: [test] + + lint: + desc: Checks code style + cmds: + - gofmt -d -s *.go + - go vet ./... + silent: true + + lint-fix: + desc: Fixes code style + cmds: + - gofmt -w -s *.go + + test: + desc: Runs go tests + cmds: + - go test -race ./... + + test-coverage: + desc: Runs go tests and calculates test coverage + cmds: + - go test -race -coverprofile=c.out ./... diff --git a/vendor/github.com/stretchr/objx/accessors.go b/vendor/github.com/stretchr/objx/accessors.go new file mode 100644 index 000000000..72f1d1c1c --- /dev/null +++ b/vendor/github.com/stretchr/objx/accessors.go @@ -0,0 +1,197 @@ +package objx + +import ( + "reflect" + "regexp" + "strconv" + "strings" +) + +const ( + // PathSeparator is the character used to separate the elements + // of the keypath. + // + // For example, `location.address.city` + PathSeparator string = "." + + // arrayAccessRegexString is the regex used to extract the array number + // from the access path + arrayAccessRegexString = `^(.+)\[([0-9]+)\]$` + + // mapAccessRegexString is the regex used to extract the map key + // from the access path + mapAccessRegexString = `^([^\[]*)\[([^\]]+)\](.*)$` +) + +// arrayAccessRegex is the compiled arrayAccessRegexString +var arrayAccessRegex = regexp.MustCompile(arrayAccessRegexString) + +// mapAccessRegex is the compiled mapAccessRegexString +var mapAccessRegex = regexp.MustCompile(mapAccessRegexString) + +// Get gets the value using the specified selector and +// returns it inside a new Obj object. +// +// If it cannot find the value, Get will return a nil +// value inside an instance of Obj. +// +// Get can only operate directly on map[string]interface{} and []interface. +// +// # Example +// +// To access the title of the third chapter of the second book, do: +// +// o.Get("books[1].chapters[2].title") +func (m Map) Get(selector string) *Value { + rawObj := access(m, selector, nil, false) + return &Value{data: rawObj} +} + +// Set sets the value using the specified selector and +// returns the object on which Set was called. +// +// Set can only operate directly on map[string]interface{} and []interface +// +// # Example +// +// To set the title of the third chapter of the second book, do: +// +// o.Set("books[1].chapters[2].title","Time to Go") +func (m Map) Set(selector string, value interface{}) Map { + access(m, selector, value, true) + return m +} + +// getIndex returns the index, which is hold in s by two branches. +// It also returns s without the index part, e.g. name[1] will return (1, name). +// If no index is found, -1 is returned +func getIndex(s string) (int, string) { + arrayMatches := arrayAccessRegex.FindStringSubmatch(s) + if len(arrayMatches) > 0 { + // Get the key into the map + selector := arrayMatches[1] + // Get the index into the array at the key + // We know this can't fail because arrayMatches[2] is an int for sure + index, _ := strconv.Atoi(arrayMatches[2]) + return index, selector + } + return -1, s +} + +// getKey returns the key which is held in s by two brackets. +// It also returns the next selector. +func getKey(s string) (string, string) { + selSegs := strings.SplitN(s, PathSeparator, 2) + thisSel := selSegs[0] + nextSel := "" + + if len(selSegs) > 1 { + nextSel = selSegs[1] + } + + mapMatches := mapAccessRegex.FindStringSubmatch(s) + if len(mapMatches) > 0 { + if _, err := strconv.Atoi(mapMatches[2]); err != nil { + thisSel = mapMatches[1] + nextSel = "[" + mapMatches[2] + "]" + mapMatches[3] + + if thisSel == "" { + thisSel = mapMatches[2] + nextSel = mapMatches[3] + } + + if nextSel == "" { + selSegs = []string{"", ""} + } else if nextSel[0] == '.' { + nextSel = nextSel[1:] + } + } + } + + return thisSel, nextSel +} + +// access accesses the object using the selector and performs the +// appropriate action. +func access(current interface{}, selector string, value interface{}, isSet bool) interface{} { + thisSel, nextSel := getKey(selector) + + indexes := []int{} + for strings.Contains(thisSel, "[") { + prevSel := thisSel + index := -1 + index, thisSel = getIndex(thisSel) + indexes = append(indexes, index) + if prevSel == thisSel { + break + } + } + + if curMap, ok := current.(Map); ok { + current = map[string]interface{}(curMap) + } + // get the object in question + switch current.(type) { + case map[string]interface{}: + curMSI := current.(map[string]interface{}) + if nextSel == "" && isSet { + curMSI[thisSel] = value + return nil + } + + _, ok := curMSI[thisSel].(map[string]interface{}) + if !ok { + _, ok = curMSI[thisSel].(Map) + } + + if (curMSI[thisSel] == nil || !ok) && len(indexes) == 0 && isSet { + curMSI[thisSel] = map[string]interface{}{} + } + + current = curMSI[thisSel] + default: + current = nil + } + + // do we need to access the item of an array? + if len(indexes) > 0 { + num := len(indexes) + for num > 0 { + num-- + index := indexes[num] + indexes = indexes[:num] + if array, ok := interSlice(current); ok { + if index < len(array) { + current = array[index] + } else { + current = nil + break + } + } + } + } + + if nextSel != "" { + current = access(current, nextSel, value, isSet) + } + return current +} + +func interSlice(slice interface{}) ([]interface{}, bool) { + if array, ok := slice.([]interface{}); ok { + return array, ok + } + + s := reflect.ValueOf(slice) + if s.Kind() != reflect.Slice { + return nil, false + } + + ret := make([]interface{}, s.Len()) + + for i := 0; i < s.Len(); i++ { + ret[i] = s.Index(i).Interface() + } + + return ret, true +} diff --git a/vendor/github.com/stretchr/objx/conversions.go b/vendor/github.com/stretchr/objx/conversions.go new file mode 100644 index 000000000..01c63d7d3 --- /dev/null +++ b/vendor/github.com/stretchr/objx/conversions.go @@ -0,0 +1,280 @@ +package objx + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/url" + "strconv" +) + +// SignatureSeparator is the character that is used to +// separate the Base64 string from the security signature. +const SignatureSeparator = "_" + +// URLValuesSliceKeySuffix is the character that is used to +// specify a suffix for slices parsed by URLValues. +// If the suffix is set to "[i]", then the index of the slice +// is used in place of i +// Ex: Suffix "[]" would have the form a[]=b&a[]=c +// OR Suffix "[i]" would have the form a[0]=b&a[1]=c +// OR Suffix "" would have the form a=b&a=c +var urlValuesSliceKeySuffix = "[]" + +const ( + URLValuesSliceKeySuffixEmpty = "" + URLValuesSliceKeySuffixArray = "[]" + URLValuesSliceKeySuffixIndex = "[i]" +) + +// SetURLValuesSliceKeySuffix sets the character that is used to +// specify a suffix for slices parsed by URLValues. +// If the suffix is set to "[i]", then the index of the slice +// is used in place of i +// Ex: Suffix "[]" would have the form a[]=b&a[]=c +// OR Suffix "[i]" would have the form a[0]=b&a[1]=c +// OR Suffix "" would have the form a=b&a=c +func SetURLValuesSliceKeySuffix(s string) error { + if s == URLValuesSliceKeySuffixEmpty || s == URLValuesSliceKeySuffixArray || s == URLValuesSliceKeySuffixIndex { + urlValuesSliceKeySuffix = s + return nil + } + + return errors.New("objx: Invalid URLValuesSliceKeySuffix provided.") +} + +// JSON converts the contained object to a JSON string +// representation +func (m Map) JSON() (string, error) { + for k, v := range m { + m[k] = cleanUp(v) + } + + result, err := json.Marshal(m) + if err != nil { + err = errors.New("objx: JSON encode failed with: " + err.Error()) + } + return string(result), err +} + +func cleanUpInterfaceArray(in []interface{}) []interface{} { + result := make([]interface{}, len(in)) + for i, v := range in { + result[i] = cleanUp(v) + } + return result +} + +func cleanUpInterfaceMap(in map[interface{}]interface{}) Map { + result := Map{} + for k, v := range in { + result[fmt.Sprintf("%v", k)] = cleanUp(v) + } + return result +} + +func cleanUpStringMap(in map[string]interface{}) Map { + result := Map{} + for k, v := range in { + result[k] = cleanUp(v) + } + return result +} + +func cleanUpMSIArray(in []map[string]interface{}) []Map { + result := make([]Map, len(in)) + for i, v := range in { + result[i] = cleanUpStringMap(v) + } + return result +} + +func cleanUpMapArray(in []Map) []Map { + result := make([]Map, len(in)) + for i, v := range in { + result[i] = cleanUpStringMap(v) + } + return result +} + +func cleanUp(v interface{}) interface{} { + switch v := v.(type) { + case []interface{}: + return cleanUpInterfaceArray(v) + case []map[string]interface{}: + return cleanUpMSIArray(v) + case map[interface{}]interface{}: + return cleanUpInterfaceMap(v) + case Map: + return cleanUpStringMap(v) + case []Map: + return cleanUpMapArray(v) + default: + return v + } +} + +// MustJSON converts the contained object to a JSON string +// representation and panics if there is an error +func (m Map) MustJSON() string { + result, err := m.JSON() + if err != nil { + panic(err.Error()) + } + return result +} + +// Base64 converts the contained object to a Base64 string +// representation of the JSON string representation +func (m Map) Base64() (string, error) { + var buf bytes.Buffer + + jsonData, err := m.JSON() + if err != nil { + return "", err + } + + encoder := base64.NewEncoder(base64.StdEncoding, &buf) + _, _ = encoder.Write([]byte(jsonData)) + _ = encoder.Close() + + return buf.String(), nil +} + +// MustBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and panics +// if there is an error +func (m Map) MustBase64() string { + result, err := m.Base64() + if err != nil { + panic(err.Error()) + } + return result +} + +// SignedBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and signs it +// using the provided key. +func (m Map) SignedBase64(key string) (string, error) { + base64, err := m.Base64() + if err != nil { + return "", err + } + + sig := HashWithKey(base64, key) + return base64 + SignatureSeparator + sig, nil +} + +// MustSignedBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and signs it +// using the provided key and panics if there is an error +func (m Map) MustSignedBase64(key string) string { + result, err := m.SignedBase64(key) + if err != nil { + panic(err.Error()) + } + return result +} + +/* + URL Query + ------------------------------------------------ +*/ + +// URLValues creates a url.Values object from an Obj. This +// function requires that the wrapped object be a map[string]interface{} +func (m Map) URLValues() url.Values { + vals := make(url.Values) + + m.parseURLValues(m, vals, "") + + return vals +} + +func (m Map) parseURLValues(queryMap Map, vals url.Values, key string) { + useSliceIndex := false + if urlValuesSliceKeySuffix == "[i]" { + useSliceIndex = true + } + + for k, v := range queryMap { + val := &Value{data: v} + switch { + case val.IsObjxMap(): + if key == "" { + m.parseURLValues(val.ObjxMap(), vals, k) + } else { + m.parseURLValues(val.ObjxMap(), vals, key+"["+k+"]") + } + case val.IsObjxMapSlice(): + sliceKey := k + if key != "" { + sliceKey = key + "[" + k + "]" + } + + if useSliceIndex { + for i, sv := range val.MustObjxMapSlice() { + sk := sliceKey + "[" + strconv.FormatInt(int64(i), 10) + "]" + m.parseURLValues(sv, vals, sk) + } + } else { + sliceKey = sliceKey + urlValuesSliceKeySuffix + for _, sv := range val.MustObjxMapSlice() { + m.parseURLValues(sv, vals, sliceKey) + } + } + case val.IsMSISlice(): + sliceKey := k + if key != "" { + sliceKey = key + "[" + k + "]" + } + + if useSliceIndex { + for i, sv := range val.MustMSISlice() { + sk := sliceKey + "[" + strconv.FormatInt(int64(i), 10) + "]" + m.parseURLValues(New(sv), vals, sk) + } + } else { + sliceKey = sliceKey + urlValuesSliceKeySuffix + for _, sv := range val.MustMSISlice() { + m.parseURLValues(New(sv), vals, sliceKey) + } + } + case val.IsStrSlice(), val.IsBoolSlice(), + val.IsFloat32Slice(), val.IsFloat64Slice(), + val.IsIntSlice(), val.IsInt8Slice(), val.IsInt16Slice(), val.IsInt32Slice(), val.IsInt64Slice(), + val.IsUintSlice(), val.IsUint8Slice(), val.IsUint16Slice(), val.IsUint32Slice(), val.IsUint64Slice(): + + sliceKey := k + if key != "" { + sliceKey = key + "[" + k + "]" + } + + if useSliceIndex { + for i, sv := range val.StringSlice() { + sk := sliceKey + "[" + strconv.FormatInt(int64(i), 10) + "]" + vals.Set(sk, sv) + } + } else { + sliceKey = sliceKey + urlValuesSliceKeySuffix + vals[sliceKey] = val.StringSlice() + } + + default: + if key == "" { + vals.Set(k, val.String()) + } else { + vals.Set(key+"["+k+"]", val.String()) + } + } + } +} + +// URLQuery gets an encoded URL query representing the given +// Obj. This function requires that the wrapped object be a +// map[string]interface{} +func (m Map) URLQuery() (string, error) { + return m.URLValues().Encode(), nil +} diff --git a/vendor/github.com/stretchr/objx/doc.go b/vendor/github.com/stretchr/objx/doc.go new file mode 100644 index 000000000..b170af74b --- /dev/null +++ b/vendor/github.com/stretchr/objx/doc.go @@ -0,0 +1,66 @@ +/* +Package objx provides utilities for dealing with maps, slices, JSON and other data. + +# Overview + +Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes +a powerful `Get` method (among others) that allows you to easily and quickly get +access to data within the map, without having to worry too much about type assertions, +missing data, default values etc. + +# Pattern + +Objx uses a predictable pattern to make access data from within `map[string]interface{}` easy. +Call one of the `objx.` functions to create your `objx.Map` to get going: + + m, err := objx.FromJSON(json) + +NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, +the rest will be optimistic and try to figure things out without panicking. + +Use `Get` to access the value you're interested in. You can use dot and array +notation too: + + m.Get("places[0].latlng") + +Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type. + + if m.Get("code").IsStr() { // Your code... } + +Or you can just assume the type, and use one of the strong type methods to extract the real value: + + m.Get("code").Int() + +If there's no value there (or if it's the wrong type) then a default value will be returned, +or you can be explicit about the default value. + + Get("code").Int(-1) + +If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating, +manipulating and selecting that data. You can find out more by exploring the index below. + +# Reading data + +A simple example of how to use Objx: + + // Use MustFromJSON to make an objx.Map from some JSON + m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) + + // Get the details + name := m.Get("name").Str() + age := m.Get("age").Int() + + // Get their nickname (or use their name if they don't have one) + nickname := m.Get("nickname").Str(name) + +# Ranging + +Since `objx.Map` is a `map[string]interface{}` you can treat it as such. +For example, to `range` the data, do what you would expect: + + m := objx.MustFromJSON(json) + for key, value := range m { + // Your code... + } +*/ +package objx diff --git a/vendor/github.com/stretchr/objx/map.go b/vendor/github.com/stretchr/objx/map.go new file mode 100644 index 000000000..ab9f9ae67 --- /dev/null +++ b/vendor/github.com/stretchr/objx/map.go @@ -0,0 +1,214 @@ +package objx + +import ( + "encoding/base64" + "encoding/json" + "errors" + "io/ioutil" + "net/url" + "strings" +) + +// MSIConvertable is an interface that defines methods for converting your +// custom types to a map[string]interface{} representation. +type MSIConvertable interface { + // MSI gets a map[string]interface{} (msi) representing the + // object. + MSI() map[string]interface{} +} + +// Map provides extended functionality for working with +// untyped data, in particular map[string]interface (msi). +type Map map[string]interface{} + +// Value returns the internal value instance +func (m Map) Value() *Value { + return &Value{data: m} +} + +// Nil represents a nil Map. +var Nil = New(nil) + +// New creates a new Map containing the map[string]interface{} in the data argument. +// If the data argument is not a map[string]interface, New attempts to call the +// MSI() method on the MSIConvertable interface to create one. +func New(data interface{}) Map { + if _, ok := data.(map[string]interface{}); !ok { + if converter, ok := data.(MSIConvertable); ok { + data = converter.MSI() + } else { + return nil + } + } + return Map(data.(map[string]interface{})) +} + +// MSI creates a map[string]interface{} and puts it inside a new Map. +// +// The arguments follow a key, value pattern. +// +// Returns nil if any key argument is non-string or if there are an odd number of arguments. +// +// # Example +// +// To easily create Maps: +// +// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true)) +// +// // creates an Map equivalent to +// m := objx.Map{"name": "Mat", "age": 29, "subobj": objx.Map{"active": true}} +func MSI(keyAndValuePairs ...interface{}) Map { + newMap := Map{} + keyAndValuePairsLen := len(keyAndValuePairs) + if keyAndValuePairsLen%2 != 0 { + return nil + } + for i := 0; i < keyAndValuePairsLen; i = i + 2 { + key := keyAndValuePairs[i] + value := keyAndValuePairs[i+1] + + // make sure the key is a string + keyString, keyStringOK := key.(string) + if !keyStringOK { + return nil + } + newMap[keyString] = value + } + return newMap +} + +// ****** Conversion Constructors + +// MustFromJSON creates a new Map containing the data specified in the +// jsonString. +// +// Panics if the JSON is invalid. +func MustFromJSON(jsonString string) Map { + o, err := FromJSON(jsonString) + if err != nil { + panic("objx: MustFromJSON failed with error: " + err.Error()) + } + return o +} + +// MustFromJSONSlice creates a new slice of Map containing the data specified in the +// jsonString. Works with jsons with a top level array +// +// Panics if the JSON is invalid. +func MustFromJSONSlice(jsonString string) []Map { + slice, err := FromJSONSlice(jsonString) + if err != nil { + panic("objx: MustFromJSONSlice failed with error: " + err.Error()) + } + return slice +} + +// FromJSON creates a new Map containing the data specified in the +// jsonString. +// +// Returns an error if the JSON is invalid. +func FromJSON(jsonString string) (Map, error) { + var m Map + err := json.Unmarshal([]byte(jsonString), &m) + if err != nil { + return Nil, err + } + return m, nil +} + +// FromJSONSlice creates a new slice of Map containing the data specified in the +// jsonString. Works with jsons with a top level array +// +// Returns an error if the JSON is invalid. +func FromJSONSlice(jsonString string) ([]Map, error) { + var slice []Map + err := json.Unmarshal([]byte(jsonString), &slice) + if err != nil { + return nil, err + } + return slice, nil +} + +// FromBase64 creates a new Obj containing the data specified +// in the Base64 string. +// +// The string is an encoded JSON string returned by Base64 +func FromBase64(base64String string) (Map, error) { + decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64String)) + decoded, err := ioutil.ReadAll(decoder) + if err != nil { + return nil, err + } + return FromJSON(string(decoded)) +} + +// MustFromBase64 creates a new Obj containing the data specified +// in the Base64 string and panics if there is an error. +// +// The string is an encoded JSON string returned by Base64 +func MustFromBase64(base64String string) Map { + result, err := FromBase64(base64String) + if err != nil { + panic("objx: MustFromBase64 failed with error: " + err.Error()) + } + return result +} + +// FromSignedBase64 creates a new Obj containing the data specified +// in the Base64 string. +// +// The string is an encoded JSON string returned by SignedBase64 +func FromSignedBase64(base64String, key string) (Map, error) { + parts := strings.Split(base64String, SignatureSeparator) + if len(parts) != 2 { + return nil, errors.New("objx: Signed base64 string is malformed") + } + + sig := HashWithKey(parts[0], key) + if parts[1] != sig { + return nil, errors.New("objx: Signature for base64 data does not match") + } + return FromBase64(parts[0]) +} + +// MustFromSignedBase64 creates a new Obj containing the data specified +// in the Base64 string and panics if there is an error. +// +// The string is an encoded JSON string returned by Base64 +func MustFromSignedBase64(base64String, key string) Map { + result, err := FromSignedBase64(base64String, key) + if err != nil { + panic("objx: MustFromSignedBase64 failed with error: " + err.Error()) + } + return result +} + +// FromURLQuery generates a new Obj by parsing the specified +// query. +// +// For queries with multiple values, the first value is selected. +func FromURLQuery(query string) (Map, error) { + vals, err := url.ParseQuery(query) + if err != nil { + return nil, err + } + m := Map{} + for k, vals := range vals { + m[k] = vals[0] + } + return m, nil +} + +// MustFromURLQuery generates a new Obj by parsing the specified +// query. +// +// For queries with multiple values, the first value is selected. +// +// Panics if it encounters an error +func MustFromURLQuery(query string) Map { + o, err := FromURLQuery(query) + if err != nil { + panic("objx: MustFromURLQuery failed with error: " + err.Error()) + } + return o +} diff --git a/vendor/github.com/stretchr/objx/mutations.go b/vendor/github.com/stretchr/objx/mutations.go new file mode 100644 index 000000000..c3400a3f7 --- /dev/null +++ b/vendor/github.com/stretchr/objx/mutations.go @@ -0,0 +1,77 @@ +package objx + +// Exclude returns a new Map with the keys in the specified []string +// excluded. +func (m Map) Exclude(exclude []string) Map { + excluded := make(Map) + for k, v := range m { + if !contains(exclude, k) { + excluded[k] = v + } + } + return excluded +} + +// Copy creates a shallow copy of the Obj. +func (m Map) Copy() Map { + copied := Map{} + for k, v := range m { + copied[k] = v + } + return copied +} + +// Merge blends the specified map with a copy of this map and returns the result. +// +// Keys that appear in both will be selected from the specified map. +// This method requires that the wrapped object be a map[string]interface{} +func (m Map) Merge(merge Map) Map { + return m.Copy().MergeHere(merge) +} + +// MergeHere blends the specified map with this map and returns the current map. +// +// Keys that appear in both will be selected from the specified map. The original map +// will be modified. This method requires that +// the wrapped object be a map[string]interface{} +func (m Map) MergeHere(merge Map) Map { + for k, v := range merge { + m[k] = v + } + return m +} + +// Transform builds a new Obj giving the transformer a chance +// to change the keys and values as it goes. This method requires that +// the wrapped object be a map[string]interface{} +func (m Map) Transform(transformer func(key string, value interface{}) (string, interface{})) Map { + newMap := Map{} + for k, v := range m { + modifiedKey, modifiedVal := transformer(k, v) + newMap[modifiedKey] = modifiedVal + } + return newMap +} + +// TransformKeys builds a new map using the specified key mapping. +// +// Unspecified keys will be unaltered. +// This method requires that the wrapped object be a map[string]interface{} +func (m Map) TransformKeys(mapping map[string]string) Map { + return m.Transform(func(key string, value interface{}) (string, interface{}) { + if newKey, ok := mapping[key]; ok { + return newKey, value + } + return key, value + }) +} + +// Checks if a string slice contains a string +func contains(s []string, e string) bool { + for _, a := range s { + if a == e { + return true + } + } + return false +} diff --git a/vendor/github.com/stretchr/objx/security.go b/vendor/github.com/stretchr/objx/security.go new file mode 100644 index 000000000..692be8e2a --- /dev/null +++ b/vendor/github.com/stretchr/objx/security.go @@ -0,0 +1,12 @@ +package objx + +import ( + "crypto/sha1" + "encoding/hex" +) + +// HashWithKey hashes the specified string using the security key +func HashWithKey(data, key string) string { + d := sha1.Sum([]byte(data + ":" + key)) + return hex.EncodeToString(d[:]) +} diff --git a/vendor/github.com/stretchr/objx/tests.go b/vendor/github.com/stretchr/objx/tests.go new file mode 100644 index 000000000..d9e0b479a --- /dev/null +++ b/vendor/github.com/stretchr/objx/tests.go @@ -0,0 +1,17 @@ +package objx + +// Has gets whether there is something at the specified selector +// or not. +// +// If m is nil, Has will always return false. +func (m Map) Has(selector string) bool { + if m == nil { + return false + } + return !m.Get(selector).IsNil() +} + +// IsNil gets whether the data is nil or not. +func (v *Value) IsNil() bool { + return v == nil || v.data == nil +} diff --git a/vendor/github.com/stretchr/objx/type_specific.go b/vendor/github.com/stretchr/objx/type_specific.go new file mode 100644 index 000000000..80f88d9fa --- /dev/null +++ b/vendor/github.com/stretchr/objx/type_specific.go @@ -0,0 +1,346 @@ +package objx + +/* + MSI (map[string]interface{} and []map[string]interface{}) +*/ + +// MSI gets the value as a map[string]interface{}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) MSI(optionalDefault ...map[string]interface{}) map[string]interface{} { + if s, ok := v.data.(map[string]interface{}); ok { + return s + } + if s, ok := v.data.(Map); ok { + return map[string]interface{}(s) + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustMSI gets the value as a map[string]interface{}. +// +// Panics if the object is not a map[string]interface{}. +func (v *Value) MustMSI() map[string]interface{} { + if s, ok := v.data.(Map); ok { + return map[string]interface{}(s) + } + return v.data.(map[string]interface{}) +} + +// MSISlice gets the value as a []map[string]interface{}, returns the optionalDefault +// value or nil if the value is not a []map[string]interface{}. +func (v *Value) MSISlice(optionalDefault ...[]map[string]interface{}) []map[string]interface{} { + if s, ok := v.data.([]map[string]interface{}); ok { + return s + } + + s := v.ObjxMapSlice() + if s == nil { + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil + } + + result := make([]map[string]interface{}, len(s)) + for i := range s { + result[i] = s[i].Value().MSI() + } + return result +} + +// MustMSISlice gets the value as a []map[string]interface{}. +// +// Panics if the object is not a []map[string]interface{}. +func (v *Value) MustMSISlice() []map[string]interface{} { + if s := v.MSISlice(); s != nil { + return s + } + + return v.data.([]map[string]interface{}) +} + +// IsMSI gets whether the object contained is a map[string]interface{} or not. +func (v *Value) IsMSI() bool { + _, ok := v.data.(map[string]interface{}) + if !ok { + _, ok = v.data.(Map) + } + return ok +} + +// IsMSISlice gets whether the object contained is a []map[string]interface{} or not. +func (v *Value) IsMSISlice() bool { + _, ok := v.data.([]map[string]interface{}) + if !ok { + _, ok = v.data.([]Map) + if !ok { + s, ok := v.data.([]interface{}) + if ok { + for i := range s { + switch s[i].(type) { + case Map: + case map[string]interface{}: + default: + return false + } + } + return true + } + } + } + return ok +} + +// EachMSI calls the specified callback for each object +// in the []map[string]interface{}. +// +// Panics if the object is the wrong type. +func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value { + for index, val := range v.MustMSISlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereMSI uses the specified decider function to select items +// from the []map[string]interface{}. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value { + var selected []map[string]interface{} + v.EachMSI(func(index int, val map[string]interface{}) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupMSI uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]map[string]interface{}. +func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value { + groups := make(map[string][]map[string]interface{}) + v.EachMSI(func(index int, val map[string]interface{}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]map[string]interface{}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceMSI uses the specified function to replace each map[string]interface{}s +// by iterating each item. The data in the returned result will be a +// []map[string]interface{} containing the replaced items. +func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value { + arr := v.MustMSISlice() + replaced := make([]map[string]interface{}, len(arr)) + v.EachMSI(func(index int, val map[string]interface{}) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectMSI uses the specified collector function to collect a value +// for each of the map[string]interface{}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value { + arr := v.MustMSISlice() + collected := make([]interface{}, len(arr)) + v.EachMSI(func(index int, val map[string]interface{}) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + ObjxMap ((Map) and [](Map)) +*/ + +// ObjxMap gets the value as a (Map), returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) ObjxMap(optionalDefault ...(Map)) Map { + if s, ok := v.data.((Map)); ok { + return s + } + if s, ok := v.data.(map[string]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return New(nil) +} + +// MustObjxMap gets the value as a (Map). +// +// Panics if the object is not a (Map). +func (v *Value) MustObjxMap() Map { + if s, ok := v.data.(map[string]interface{}); ok { + return s + } + return v.data.((Map)) +} + +// ObjxMapSlice gets the value as a [](Map), returns the optionalDefault +// value or nil if the value is not a [](Map). +func (v *Value) ObjxMapSlice(optionalDefault ...[](Map)) [](Map) { + if s, ok := v.data.([]Map); ok { + return s + } + + if s, ok := v.data.([]map[string]interface{}); ok { + result := make([]Map, len(s)) + for i := range s { + result[i] = s[i] + } + return result + } + + s, ok := v.data.([]interface{}) + if !ok { + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil + } + + result := make([]Map, len(s)) + for i := range s { + switch s[i].(type) { + case Map: + result[i] = s[i].(Map) + case map[string]interface{}: + result[i] = New(s[i]) + default: + return nil + } + } + return result +} + +// MustObjxMapSlice gets the value as a [](Map). +// +// Panics if the object is not a [](Map). +func (v *Value) MustObjxMapSlice() [](Map) { + if s := v.ObjxMapSlice(); s != nil { + return s + } + return v.data.([](Map)) +} + +// IsObjxMap gets whether the object contained is a (Map) or not. +func (v *Value) IsObjxMap() bool { + _, ok := v.data.((Map)) + if !ok { + _, ok = v.data.(map[string]interface{}) + } + return ok +} + +// IsObjxMapSlice gets whether the object contained is a [](Map) or not. +func (v *Value) IsObjxMapSlice() bool { + _, ok := v.data.([](Map)) + if !ok { + _, ok = v.data.([]map[string]interface{}) + if !ok { + s, ok := v.data.([]interface{}) + if ok { + for i := range s { + switch s[i].(type) { + case Map: + case map[string]interface{}: + default: + return false + } + } + return true + } + } + } + + return ok +} + +// EachObjxMap calls the specified callback for each object +// in the [](Map). +// +// Panics if the object is the wrong type. +func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value { + for index, val := range v.MustObjxMapSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereObjxMap uses the specified decider function to select items +// from the [](Map). The object contained in the result will contain +// only the selected items. +func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value { + var selected [](Map) + v.EachObjxMap(func(index int, val Map) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupObjxMap uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][](Map). +func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value { + groups := make(map[string][](Map)) + v.EachObjxMap(func(index int, val Map) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([](Map), 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceObjxMap uses the specified function to replace each (Map)s +// by iterating each item. The data in the returned result will be a +// [](Map) containing the replaced items. +func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value { + arr := v.MustObjxMapSlice() + replaced := make([](Map), len(arr)) + v.EachObjxMap(func(index int, val Map) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectObjxMap uses the specified collector function to collect a value +// for each of the (Map)s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value { + arr := v.MustObjxMapSlice() + collected := make([]interface{}, len(arr)) + v.EachObjxMap(func(index int, val Map) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} diff --git a/vendor/github.com/stretchr/objx/type_specific_codegen.go b/vendor/github.com/stretchr/objx/type_specific_codegen.go new file mode 100644 index 000000000..45850456e --- /dev/null +++ b/vendor/github.com/stretchr/objx/type_specific_codegen.go @@ -0,0 +1,2261 @@ +package objx + +/* + Inter (interface{} and []interface{}) +*/ + +// Inter gets the value as a interface{}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Inter(optionalDefault ...interface{}) interface{} { + if s, ok := v.data.(interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInter gets the value as a interface{}. +// +// Panics if the object is not a interface{}. +func (v *Value) MustInter() interface{} { + return v.data.(interface{}) +} + +// InterSlice gets the value as a []interface{}, returns the optionalDefault +// value or nil if the value is not a []interface{}. +func (v *Value) InterSlice(optionalDefault ...[]interface{}) []interface{} { + if s, ok := v.data.([]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInterSlice gets the value as a []interface{}. +// +// Panics if the object is not a []interface{}. +func (v *Value) MustInterSlice() []interface{} { + return v.data.([]interface{}) +} + +// IsInter gets whether the object contained is a interface{} or not. +func (v *Value) IsInter() bool { + _, ok := v.data.(interface{}) + return ok +} + +// IsInterSlice gets whether the object contained is a []interface{} or not. +func (v *Value) IsInterSlice() bool { + _, ok := v.data.([]interface{}) + return ok +} + +// EachInter calls the specified callback for each object +// in the []interface{}. +// +// Panics if the object is the wrong type. +func (v *Value) EachInter(callback func(int, interface{}) bool) *Value { + for index, val := range v.MustInterSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInter uses the specified decider function to select items +// from the []interface{}. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInter(decider func(int, interface{}) bool) *Value { + var selected []interface{} + v.EachInter(func(index int, val interface{}) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInter uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]interface{}. +func (v *Value) GroupInter(grouper func(int, interface{}) string) *Value { + groups := make(map[string][]interface{}) + v.EachInter(func(index int, val interface{}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]interface{}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInter uses the specified function to replace each interface{}s +// by iterating each item. The data in the returned result will be a +// []interface{} containing the replaced items. +func (v *Value) ReplaceInter(replacer func(int, interface{}) interface{}) *Value { + arr := v.MustInterSlice() + replaced := make([]interface{}, len(arr)) + v.EachInter(func(index int, val interface{}) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInter uses the specified collector function to collect a value +// for each of the interface{}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInter(collector func(int, interface{}) interface{}) *Value { + arr := v.MustInterSlice() + collected := make([]interface{}, len(arr)) + v.EachInter(func(index int, val interface{}) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Bool (bool and []bool) +*/ + +// Bool gets the value as a bool, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Bool(optionalDefault ...bool) bool { + if s, ok := v.data.(bool); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return false +} + +// MustBool gets the value as a bool. +// +// Panics if the object is not a bool. +func (v *Value) MustBool() bool { + return v.data.(bool) +} + +// BoolSlice gets the value as a []bool, returns the optionalDefault +// value or nil if the value is not a []bool. +func (v *Value) BoolSlice(optionalDefault ...[]bool) []bool { + if s, ok := v.data.([]bool); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustBoolSlice gets the value as a []bool. +// +// Panics if the object is not a []bool. +func (v *Value) MustBoolSlice() []bool { + return v.data.([]bool) +} + +// IsBool gets whether the object contained is a bool or not. +func (v *Value) IsBool() bool { + _, ok := v.data.(bool) + return ok +} + +// IsBoolSlice gets whether the object contained is a []bool or not. +func (v *Value) IsBoolSlice() bool { + _, ok := v.data.([]bool) + return ok +} + +// EachBool calls the specified callback for each object +// in the []bool. +// +// Panics if the object is the wrong type. +func (v *Value) EachBool(callback func(int, bool) bool) *Value { + for index, val := range v.MustBoolSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereBool uses the specified decider function to select items +// from the []bool. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereBool(decider func(int, bool) bool) *Value { + var selected []bool + v.EachBool(func(index int, val bool) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupBool uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]bool. +func (v *Value) GroupBool(grouper func(int, bool) string) *Value { + groups := make(map[string][]bool) + v.EachBool(func(index int, val bool) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]bool, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceBool uses the specified function to replace each bools +// by iterating each item. The data in the returned result will be a +// []bool containing the replaced items. +func (v *Value) ReplaceBool(replacer func(int, bool) bool) *Value { + arr := v.MustBoolSlice() + replaced := make([]bool, len(arr)) + v.EachBool(func(index int, val bool) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectBool uses the specified collector function to collect a value +// for each of the bools in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectBool(collector func(int, bool) interface{}) *Value { + arr := v.MustBoolSlice() + collected := make([]interface{}, len(arr)) + v.EachBool(func(index int, val bool) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Str (string and []string) +*/ + +// Str gets the value as a string, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Str(optionalDefault ...string) string { + if s, ok := v.data.(string); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return "" +} + +// MustStr gets the value as a string. +// +// Panics if the object is not a string. +func (v *Value) MustStr() string { + return v.data.(string) +} + +// StrSlice gets the value as a []string, returns the optionalDefault +// value or nil if the value is not a []string. +func (v *Value) StrSlice(optionalDefault ...[]string) []string { + if s, ok := v.data.([]string); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustStrSlice gets the value as a []string. +// +// Panics if the object is not a []string. +func (v *Value) MustStrSlice() []string { + return v.data.([]string) +} + +// IsStr gets whether the object contained is a string or not. +func (v *Value) IsStr() bool { + _, ok := v.data.(string) + return ok +} + +// IsStrSlice gets whether the object contained is a []string or not. +func (v *Value) IsStrSlice() bool { + _, ok := v.data.([]string) + return ok +} + +// EachStr calls the specified callback for each object +// in the []string. +// +// Panics if the object is the wrong type. +func (v *Value) EachStr(callback func(int, string) bool) *Value { + for index, val := range v.MustStrSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereStr uses the specified decider function to select items +// from the []string. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereStr(decider func(int, string) bool) *Value { + var selected []string + v.EachStr(func(index int, val string) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupStr uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]string. +func (v *Value) GroupStr(grouper func(int, string) string) *Value { + groups := make(map[string][]string) + v.EachStr(func(index int, val string) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]string, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceStr uses the specified function to replace each strings +// by iterating each item. The data in the returned result will be a +// []string containing the replaced items. +func (v *Value) ReplaceStr(replacer func(int, string) string) *Value { + arr := v.MustStrSlice() + replaced := make([]string, len(arr)) + v.EachStr(func(index int, val string) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectStr uses the specified collector function to collect a value +// for each of the strings in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectStr(collector func(int, string) interface{}) *Value { + arr := v.MustStrSlice() + collected := make([]interface{}, len(arr)) + v.EachStr(func(index int, val string) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int (int and []int) +*/ + +// Int gets the value as a int, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int(optionalDefault ...int) int { + if s, ok := v.data.(int); ok { + return s + } + if s, ok := v.data.(float64); ok { + if float64(int(s)) == s { + return int(s) + } + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt gets the value as a int. +// +// Panics if the object is not a int. +func (v *Value) MustInt() int { + if s, ok := v.data.(float64); ok { + if float64(int(s)) == s { + return int(s) + } + } + return v.data.(int) +} + +// IntSlice gets the value as a []int, returns the optionalDefault +// value or nil if the value is not a []int. +func (v *Value) IntSlice(optionalDefault ...[]int) []int { + if s, ok := v.data.([]int); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustIntSlice gets the value as a []int. +// +// Panics if the object is not a []int. +func (v *Value) MustIntSlice() []int { + return v.data.([]int) +} + +// IsInt gets whether the object contained is a int or not. +func (v *Value) IsInt() bool { + _, ok := v.data.(int) + return ok +} + +// IsIntSlice gets whether the object contained is a []int or not. +func (v *Value) IsIntSlice() bool { + _, ok := v.data.([]int) + return ok +} + +// EachInt calls the specified callback for each object +// in the []int. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt(callback func(int, int) bool) *Value { + for index, val := range v.MustIntSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt uses the specified decider function to select items +// from the []int. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt(decider func(int, int) bool) *Value { + var selected []int + v.EachInt(func(index int, val int) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int. +func (v *Value) GroupInt(grouper func(int, int) string) *Value { + groups := make(map[string][]int) + v.EachInt(func(index int, val int) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt uses the specified function to replace each ints +// by iterating each item. The data in the returned result will be a +// []int containing the replaced items. +func (v *Value) ReplaceInt(replacer func(int, int) int) *Value { + arr := v.MustIntSlice() + replaced := make([]int, len(arr)) + v.EachInt(func(index int, val int) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt uses the specified collector function to collect a value +// for each of the ints in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt(collector func(int, int) interface{}) *Value { + arr := v.MustIntSlice() + collected := make([]interface{}, len(arr)) + v.EachInt(func(index int, val int) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int8 (int8 and []int8) +*/ + +// Int8 gets the value as a int8, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int8(optionalDefault ...int8) int8 { + if s, ok := v.data.(int8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt8 gets the value as a int8. +// +// Panics if the object is not a int8. +func (v *Value) MustInt8() int8 { + return v.data.(int8) +} + +// Int8Slice gets the value as a []int8, returns the optionalDefault +// value or nil if the value is not a []int8. +func (v *Value) Int8Slice(optionalDefault ...[]int8) []int8 { + if s, ok := v.data.([]int8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt8Slice gets the value as a []int8. +// +// Panics if the object is not a []int8. +func (v *Value) MustInt8Slice() []int8 { + return v.data.([]int8) +} + +// IsInt8 gets whether the object contained is a int8 or not. +func (v *Value) IsInt8() bool { + _, ok := v.data.(int8) + return ok +} + +// IsInt8Slice gets whether the object contained is a []int8 or not. +func (v *Value) IsInt8Slice() bool { + _, ok := v.data.([]int8) + return ok +} + +// EachInt8 calls the specified callback for each object +// in the []int8. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt8(callback func(int, int8) bool) *Value { + for index, val := range v.MustInt8Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt8 uses the specified decider function to select items +// from the []int8. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt8(decider func(int, int8) bool) *Value { + var selected []int8 + v.EachInt8(func(index int, val int8) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt8 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int8. +func (v *Value) GroupInt8(grouper func(int, int8) string) *Value { + groups := make(map[string][]int8) + v.EachInt8(func(index int, val int8) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int8, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt8 uses the specified function to replace each int8s +// by iterating each item. The data in the returned result will be a +// []int8 containing the replaced items. +func (v *Value) ReplaceInt8(replacer func(int, int8) int8) *Value { + arr := v.MustInt8Slice() + replaced := make([]int8, len(arr)) + v.EachInt8(func(index int, val int8) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt8 uses the specified collector function to collect a value +// for each of the int8s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt8(collector func(int, int8) interface{}) *Value { + arr := v.MustInt8Slice() + collected := make([]interface{}, len(arr)) + v.EachInt8(func(index int, val int8) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int16 (int16 and []int16) +*/ + +// Int16 gets the value as a int16, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int16(optionalDefault ...int16) int16 { + if s, ok := v.data.(int16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt16 gets the value as a int16. +// +// Panics if the object is not a int16. +func (v *Value) MustInt16() int16 { + return v.data.(int16) +} + +// Int16Slice gets the value as a []int16, returns the optionalDefault +// value or nil if the value is not a []int16. +func (v *Value) Int16Slice(optionalDefault ...[]int16) []int16 { + if s, ok := v.data.([]int16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt16Slice gets the value as a []int16. +// +// Panics if the object is not a []int16. +func (v *Value) MustInt16Slice() []int16 { + return v.data.([]int16) +} + +// IsInt16 gets whether the object contained is a int16 or not. +func (v *Value) IsInt16() bool { + _, ok := v.data.(int16) + return ok +} + +// IsInt16Slice gets whether the object contained is a []int16 or not. +func (v *Value) IsInt16Slice() bool { + _, ok := v.data.([]int16) + return ok +} + +// EachInt16 calls the specified callback for each object +// in the []int16. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt16(callback func(int, int16) bool) *Value { + for index, val := range v.MustInt16Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt16 uses the specified decider function to select items +// from the []int16. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt16(decider func(int, int16) bool) *Value { + var selected []int16 + v.EachInt16(func(index int, val int16) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt16 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int16. +func (v *Value) GroupInt16(grouper func(int, int16) string) *Value { + groups := make(map[string][]int16) + v.EachInt16(func(index int, val int16) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int16, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt16 uses the specified function to replace each int16s +// by iterating each item. The data in the returned result will be a +// []int16 containing the replaced items. +func (v *Value) ReplaceInt16(replacer func(int, int16) int16) *Value { + arr := v.MustInt16Slice() + replaced := make([]int16, len(arr)) + v.EachInt16(func(index int, val int16) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt16 uses the specified collector function to collect a value +// for each of the int16s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt16(collector func(int, int16) interface{}) *Value { + arr := v.MustInt16Slice() + collected := make([]interface{}, len(arr)) + v.EachInt16(func(index int, val int16) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int32 (int32 and []int32) +*/ + +// Int32 gets the value as a int32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int32(optionalDefault ...int32) int32 { + if s, ok := v.data.(int32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt32 gets the value as a int32. +// +// Panics if the object is not a int32. +func (v *Value) MustInt32() int32 { + return v.data.(int32) +} + +// Int32Slice gets the value as a []int32, returns the optionalDefault +// value or nil if the value is not a []int32. +func (v *Value) Int32Slice(optionalDefault ...[]int32) []int32 { + if s, ok := v.data.([]int32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt32Slice gets the value as a []int32. +// +// Panics if the object is not a []int32. +func (v *Value) MustInt32Slice() []int32 { + return v.data.([]int32) +} + +// IsInt32 gets whether the object contained is a int32 or not. +func (v *Value) IsInt32() bool { + _, ok := v.data.(int32) + return ok +} + +// IsInt32Slice gets whether the object contained is a []int32 or not. +func (v *Value) IsInt32Slice() bool { + _, ok := v.data.([]int32) + return ok +} + +// EachInt32 calls the specified callback for each object +// in the []int32. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt32(callback func(int, int32) bool) *Value { + for index, val := range v.MustInt32Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt32 uses the specified decider function to select items +// from the []int32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt32(decider func(int, int32) bool) *Value { + var selected []int32 + v.EachInt32(func(index int, val int32) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int32. +func (v *Value) GroupInt32(grouper func(int, int32) string) *Value { + groups := make(map[string][]int32) + v.EachInt32(func(index int, val int32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt32 uses the specified function to replace each int32s +// by iterating each item. The data in the returned result will be a +// []int32 containing the replaced items. +func (v *Value) ReplaceInt32(replacer func(int, int32) int32) *Value { + arr := v.MustInt32Slice() + replaced := make([]int32, len(arr)) + v.EachInt32(func(index int, val int32) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt32 uses the specified collector function to collect a value +// for each of the int32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt32(collector func(int, int32) interface{}) *Value { + arr := v.MustInt32Slice() + collected := make([]interface{}, len(arr)) + v.EachInt32(func(index int, val int32) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int64 (int64 and []int64) +*/ + +// Int64 gets the value as a int64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int64(optionalDefault ...int64) int64 { + if s, ok := v.data.(int64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt64 gets the value as a int64. +// +// Panics if the object is not a int64. +func (v *Value) MustInt64() int64 { + return v.data.(int64) +} + +// Int64Slice gets the value as a []int64, returns the optionalDefault +// value or nil if the value is not a []int64. +func (v *Value) Int64Slice(optionalDefault ...[]int64) []int64 { + if s, ok := v.data.([]int64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt64Slice gets the value as a []int64. +// +// Panics if the object is not a []int64. +func (v *Value) MustInt64Slice() []int64 { + return v.data.([]int64) +} + +// IsInt64 gets whether the object contained is a int64 or not. +func (v *Value) IsInt64() bool { + _, ok := v.data.(int64) + return ok +} + +// IsInt64Slice gets whether the object contained is a []int64 or not. +func (v *Value) IsInt64Slice() bool { + _, ok := v.data.([]int64) + return ok +} + +// EachInt64 calls the specified callback for each object +// in the []int64. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt64(callback func(int, int64) bool) *Value { + for index, val := range v.MustInt64Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt64 uses the specified decider function to select items +// from the []int64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt64(decider func(int, int64) bool) *Value { + var selected []int64 + v.EachInt64(func(index int, val int64) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int64. +func (v *Value) GroupInt64(grouper func(int, int64) string) *Value { + groups := make(map[string][]int64) + v.EachInt64(func(index int, val int64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt64 uses the specified function to replace each int64s +// by iterating each item. The data in the returned result will be a +// []int64 containing the replaced items. +func (v *Value) ReplaceInt64(replacer func(int, int64) int64) *Value { + arr := v.MustInt64Slice() + replaced := make([]int64, len(arr)) + v.EachInt64(func(index int, val int64) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt64 uses the specified collector function to collect a value +// for each of the int64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt64(collector func(int, int64) interface{}) *Value { + arr := v.MustInt64Slice() + collected := make([]interface{}, len(arr)) + v.EachInt64(func(index int, val int64) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint (uint and []uint) +*/ + +// Uint gets the value as a uint, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint(optionalDefault ...uint) uint { + if s, ok := v.data.(uint); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint gets the value as a uint. +// +// Panics if the object is not a uint. +func (v *Value) MustUint() uint { + return v.data.(uint) +} + +// UintSlice gets the value as a []uint, returns the optionalDefault +// value or nil if the value is not a []uint. +func (v *Value) UintSlice(optionalDefault ...[]uint) []uint { + if s, ok := v.data.([]uint); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUintSlice gets the value as a []uint. +// +// Panics if the object is not a []uint. +func (v *Value) MustUintSlice() []uint { + return v.data.([]uint) +} + +// IsUint gets whether the object contained is a uint or not. +func (v *Value) IsUint() bool { + _, ok := v.data.(uint) + return ok +} + +// IsUintSlice gets whether the object contained is a []uint or not. +func (v *Value) IsUintSlice() bool { + _, ok := v.data.([]uint) + return ok +} + +// EachUint calls the specified callback for each object +// in the []uint. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint(callback func(int, uint) bool) *Value { + for index, val := range v.MustUintSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint uses the specified decider function to select items +// from the []uint. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint(decider func(int, uint) bool) *Value { + var selected []uint + v.EachUint(func(index int, val uint) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint. +func (v *Value) GroupUint(grouper func(int, uint) string) *Value { + groups := make(map[string][]uint) + v.EachUint(func(index int, val uint) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint uses the specified function to replace each uints +// by iterating each item. The data in the returned result will be a +// []uint containing the replaced items. +func (v *Value) ReplaceUint(replacer func(int, uint) uint) *Value { + arr := v.MustUintSlice() + replaced := make([]uint, len(arr)) + v.EachUint(func(index int, val uint) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint uses the specified collector function to collect a value +// for each of the uints in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint(collector func(int, uint) interface{}) *Value { + arr := v.MustUintSlice() + collected := make([]interface{}, len(arr)) + v.EachUint(func(index int, val uint) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint8 (uint8 and []uint8) +*/ + +// Uint8 gets the value as a uint8, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint8(optionalDefault ...uint8) uint8 { + if s, ok := v.data.(uint8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint8 gets the value as a uint8. +// +// Panics if the object is not a uint8. +func (v *Value) MustUint8() uint8 { + return v.data.(uint8) +} + +// Uint8Slice gets the value as a []uint8, returns the optionalDefault +// value or nil if the value is not a []uint8. +func (v *Value) Uint8Slice(optionalDefault ...[]uint8) []uint8 { + if s, ok := v.data.([]uint8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint8Slice gets the value as a []uint8. +// +// Panics if the object is not a []uint8. +func (v *Value) MustUint8Slice() []uint8 { + return v.data.([]uint8) +} + +// IsUint8 gets whether the object contained is a uint8 or not. +func (v *Value) IsUint8() bool { + _, ok := v.data.(uint8) + return ok +} + +// IsUint8Slice gets whether the object contained is a []uint8 or not. +func (v *Value) IsUint8Slice() bool { + _, ok := v.data.([]uint8) + return ok +} + +// EachUint8 calls the specified callback for each object +// in the []uint8. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint8(callback func(int, uint8) bool) *Value { + for index, val := range v.MustUint8Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint8 uses the specified decider function to select items +// from the []uint8. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint8(decider func(int, uint8) bool) *Value { + var selected []uint8 + v.EachUint8(func(index int, val uint8) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint8 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint8. +func (v *Value) GroupUint8(grouper func(int, uint8) string) *Value { + groups := make(map[string][]uint8) + v.EachUint8(func(index int, val uint8) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint8, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint8 uses the specified function to replace each uint8s +// by iterating each item. The data in the returned result will be a +// []uint8 containing the replaced items. +func (v *Value) ReplaceUint8(replacer func(int, uint8) uint8) *Value { + arr := v.MustUint8Slice() + replaced := make([]uint8, len(arr)) + v.EachUint8(func(index int, val uint8) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint8 uses the specified collector function to collect a value +// for each of the uint8s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint8(collector func(int, uint8) interface{}) *Value { + arr := v.MustUint8Slice() + collected := make([]interface{}, len(arr)) + v.EachUint8(func(index int, val uint8) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint16 (uint16 and []uint16) +*/ + +// Uint16 gets the value as a uint16, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint16(optionalDefault ...uint16) uint16 { + if s, ok := v.data.(uint16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint16 gets the value as a uint16. +// +// Panics if the object is not a uint16. +func (v *Value) MustUint16() uint16 { + return v.data.(uint16) +} + +// Uint16Slice gets the value as a []uint16, returns the optionalDefault +// value or nil if the value is not a []uint16. +func (v *Value) Uint16Slice(optionalDefault ...[]uint16) []uint16 { + if s, ok := v.data.([]uint16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint16Slice gets the value as a []uint16. +// +// Panics if the object is not a []uint16. +func (v *Value) MustUint16Slice() []uint16 { + return v.data.([]uint16) +} + +// IsUint16 gets whether the object contained is a uint16 or not. +func (v *Value) IsUint16() bool { + _, ok := v.data.(uint16) + return ok +} + +// IsUint16Slice gets whether the object contained is a []uint16 or not. +func (v *Value) IsUint16Slice() bool { + _, ok := v.data.([]uint16) + return ok +} + +// EachUint16 calls the specified callback for each object +// in the []uint16. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint16(callback func(int, uint16) bool) *Value { + for index, val := range v.MustUint16Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint16 uses the specified decider function to select items +// from the []uint16. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint16(decider func(int, uint16) bool) *Value { + var selected []uint16 + v.EachUint16(func(index int, val uint16) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint16 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint16. +func (v *Value) GroupUint16(grouper func(int, uint16) string) *Value { + groups := make(map[string][]uint16) + v.EachUint16(func(index int, val uint16) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint16, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint16 uses the specified function to replace each uint16s +// by iterating each item. The data in the returned result will be a +// []uint16 containing the replaced items. +func (v *Value) ReplaceUint16(replacer func(int, uint16) uint16) *Value { + arr := v.MustUint16Slice() + replaced := make([]uint16, len(arr)) + v.EachUint16(func(index int, val uint16) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint16 uses the specified collector function to collect a value +// for each of the uint16s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint16(collector func(int, uint16) interface{}) *Value { + arr := v.MustUint16Slice() + collected := make([]interface{}, len(arr)) + v.EachUint16(func(index int, val uint16) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint32 (uint32 and []uint32) +*/ + +// Uint32 gets the value as a uint32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint32(optionalDefault ...uint32) uint32 { + if s, ok := v.data.(uint32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint32 gets the value as a uint32. +// +// Panics if the object is not a uint32. +func (v *Value) MustUint32() uint32 { + return v.data.(uint32) +} + +// Uint32Slice gets the value as a []uint32, returns the optionalDefault +// value or nil if the value is not a []uint32. +func (v *Value) Uint32Slice(optionalDefault ...[]uint32) []uint32 { + if s, ok := v.data.([]uint32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint32Slice gets the value as a []uint32. +// +// Panics if the object is not a []uint32. +func (v *Value) MustUint32Slice() []uint32 { + return v.data.([]uint32) +} + +// IsUint32 gets whether the object contained is a uint32 or not. +func (v *Value) IsUint32() bool { + _, ok := v.data.(uint32) + return ok +} + +// IsUint32Slice gets whether the object contained is a []uint32 or not. +func (v *Value) IsUint32Slice() bool { + _, ok := v.data.([]uint32) + return ok +} + +// EachUint32 calls the specified callback for each object +// in the []uint32. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint32(callback func(int, uint32) bool) *Value { + for index, val := range v.MustUint32Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint32 uses the specified decider function to select items +// from the []uint32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint32(decider func(int, uint32) bool) *Value { + var selected []uint32 + v.EachUint32(func(index int, val uint32) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint32. +func (v *Value) GroupUint32(grouper func(int, uint32) string) *Value { + groups := make(map[string][]uint32) + v.EachUint32(func(index int, val uint32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint32 uses the specified function to replace each uint32s +// by iterating each item. The data in the returned result will be a +// []uint32 containing the replaced items. +func (v *Value) ReplaceUint32(replacer func(int, uint32) uint32) *Value { + arr := v.MustUint32Slice() + replaced := make([]uint32, len(arr)) + v.EachUint32(func(index int, val uint32) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint32 uses the specified collector function to collect a value +// for each of the uint32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint32(collector func(int, uint32) interface{}) *Value { + arr := v.MustUint32Slice() + collected := make([]interface{}, len(arr)) + v.EachUint32(func(index int, val uint32) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint64 (uint64 and []uint64) +*/ + +// Uint64 gets the value as a uint64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint64(optionalDefault ...uint64) uint64 { + if s, ok := v.data.(uint64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint64 gets the value as a uint64. +// +// Panics if the object is not a uint64. +func (v *Value) MustUint64() uint64 { + return v.data.(uint64) +} + +// Uint64Slice gets the value as a []uint64, returns the optionalDefault +// value or nil if the value is not a []uint64. +func (v *Value) Uint64Slice(optionalDefault ...[]uint64) []uint64 { + if s, ok := v.data.([]uint64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint64Slice gets the value as a []uint64. +// +// Panics if the object is not a []uint64. +func (v *Value) MustUint64Slice() []uint64 { + return v.data.([]uint64) +} + +// IsUint64 gets whether the object contained is a uint64 or not. +func (v *Value) IsUint64() bool { + _, ok := v.data.(uint64) + return ok +} + +// IsUint64Slice gets whether the object contained is a []uint64 or not. +func (v *Value) IsUint64Slice() bool { + _, ok := v.data.([]uint64) + return ok +} + +// EachUint64 calls the specified callback for each object +// in the []uint64. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint64(callback func(int, uint64) bool) *Value { + for index, val := range v.MustUint64Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint64 uses the specified decider function to select items +// from the []uint64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint64(decider func(int, uint64) bool) *Value { + var selected []uint64 + v.EachUint64(func(index int, val uint64) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint64. +func (v *Value) GroupUint64(grouper func(int, uint64) string) *Value { + groups := make(map[string][]uint64) + v.EachUint64(func(index int, val uint64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint64 uses the specified function to replace each uint64s +// by iterating each item. The data in the returned result will be a +// []uint64 containing the replaced items. +func (v *Value) ReplaceUint64(replacer func(int, uint64) uint64) *Value { + arr := v.MustUint64Slice() + replaced := make([]uint64, len(arr)) + v.EachUint64(func(index int, val uint64) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint64 uses the specified collector function to collect a value +// for each of the uint64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint64(collector func(int, uint64) interface{}) *Value { + arr := v.MustUint64Slice() + collected := make([]interface{}, len(arr)) + v.EachUint64(func(index int, val uint64) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uintptr (uintptr and []uintptr) +*/ + +// Uintptr gets the value as a uintptr, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uintptr(optionalDefault ...uintptr) uintptr { + if s, ok := v.data.(uintptr); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUintptr gets the value as a uintptr. +// +// Panics if the object is not a uintptr. +func (v *Value) MustUintptr() uintptr { + return v.data.(uintptr) +} + +// UintptrSlice gets the value as a []uintptr, returns the optionalDefault +// value or nil if the value is not a []uintptr. +func (v *Value) UintptrSlice(optionalDefault ...[]uintptr) []uintptr { + if s, ok := v.data.([]uintptr); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUintptrSlice gets the value as a []uintptr. +// +// Panics if the object is not a []uintptr. +func (v *Value) MustUintptrSlice() []uintptr { + return v.data.([]uintptr) +} + +// IsUintptr gets whether the object contained is a uintptr or not. +func (v *Value) IsUintptr() bool { + _, ok := v.data.(uintptr) + return ok +} + +// IsUintptrSlice gets whether the object contained is a []uintptr or not. +func (v *Value) IsUintptrSlice() bool { + _, ok := v.data.([]uintptr) + return ok +} + +// EachUintptr calls the specified callback for each object +// in the []uintptr. +// +// Panics if the object is the wrong type. +func (v *Value) EachUintptr(callback func(int, uintptr) bool) *Value { + for index, val := range v.MustUintptrSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUintptr uses the specified decider function to select items +// from the []uintptr. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUintptr(decider func(int, uintptr) bool) *Value { + var selected []uintptr + v.EachUintptr(func(index int, val uintptr) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUintptr uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uintptr. +func (v *Value) GroupUintptr(grouper func(int, uintptr) string) *Value { + groups := make(map[string][]uintptr) + v.EachUintptr(func(index int, val uintptr) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uintptr, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUintptr uses the specified function to replace each uintptrs +// by iterating each item. The data in the returned result will be a +// []uintptr containing the replaced items. +func (v *Value) ReplaceUintptr(replacer func(int, uintptr) uintptr) *Value { + arr := v.MustUintptrSlice() + replaced := make([]uintptr, len(arr)) + v.EachUintptr(func(index int, val uintptr) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUintptr uses the specified collector function to collect a value +// for each of the uintptrs in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUintptr(collector func(int, uintptr) interface{}) *Value { + arr := v.MustUintptrSlice() + collected := make([]interface{}, len(arr)) + v.EachUintptr(func(index int, val uintptr) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Float32 (float32 and []float32) +*/ + +// Float32 gets the value as a float32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Float32(optionalDefault ...float32) float32 { + if s, ok := v.data.(float32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustFloat32 gets the value as a float32. +// +// Panics if the object is not a float32. +func (v *Value) MustFloat32() float32 { + return v.data.(float32) +} + +// Float32Slice gets the value as a []float32, returns the optionalDefault +// value or nil if the value is not a []float32. +func (v *Value) Float32Slice(optionalDefault ...[]float32) []float32 { + if s, ok := v.data.([]float32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustFloat32Slice gets the value as a []float32. +// +// Panics if the object is not a []float32. +func (v *Value) MustFloat32Slice() []float32 { + return v.data.([]float32) +} + +// IsFloat32 gets whether the object contained is a float32 or not. +func (v *Value) IsFloat32() bool { + _, ok := v.data.(float32) + return ok +} + +// IsFloat32Slice gets whether the object contained is a []float32 or not. +func (v *Value) IsFloat32Slice() bool { + _, ok := v.data.([]float32) + return ok +} + +// EachFloat32 calls the specified callback for each object +// in the []float32. +// +// Panics if the object is the wrong type. +func (v *Value) EachFloat32(callback func(int, float32) bool) *Value { + for index, val := range v.MustFloat32Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereFloat32 uses the specified decider function to select items +// from the []float32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereFloat32(decider func(int, float32) bool) *Value { + var selected []float32 + v.EachFloat32(func(index int, val float32) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupFloat32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]float32. +func (v *Value) GroupFloat32(grouper func(int, float32) string) *Value { + groups := make(map[string][]float32) + v.EachFloat32(func(index int, val float32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]float32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceFloat32 uses the specified function to replace each float32s +// by iterating each item. The data in the returned result will be a +// []float32 containing the replaced items. +func (v *Value) ReplaceFloat32(replacer func(int, float32) float32) *Value { + arr := v.MustFloat32Slice() + replaced := make([]float32, len(arr)) + v.EachFloat32(func(index int, val float32) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectFloat32 uses the specified collector function to collect a value +// for each of the float32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectFloat32(collector func(int, float32) interface{}) *Value { + arr := v.MustFloat32Slice() + collected := make([]interface{}, len(arr)) + v.EachFloat32(func(index int, val float32) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Float64 (float64 and []float64) +*/ + +// Float64 gets the value as a float64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Float64(optionalDefault ...float64) float64 { + if s, ok := v.data.(float64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustFloat64 gets the value as a float64. +// +// Panics if the object is not a float64. +func (v *Value) MustFloat64() float64 { + return v.data.(float64) +} + +// Float64Slice gets the value as a []float64, returns the optionalDefault +// value or nil if the value is not a []float64. +func (v *Value) Float64Slice(optionalDefault ...[]float64) []float64 { + if s, ok := v.data.([]float64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustFloat64Slice gets the value as a []float64. +// +// Panics if the object is not a []float64. +func (v *Value) MustFloat64Slice() []float64 { + return v.data.([]float64) +} + +// IsFloat64 gets whether the object contained is a float64 or not. +func (v *Value) IsFloat64() bool { + _, ok := v.data.(float64) + return ok +} + +// IsFloat64Slice gets whether the object contained is a []float64 or not. +func (v *Value) IsFloat64Slice() bool { + _, ok := v.data.([]float64) + return ok +} + +// EachFloat64 calls the specified callback for each object +// in the []float64. +// +// Panics if the object is the wrong type. +func (v *Value) EachFloat64(callback func(int, float64) bool) *Value { + for index, val := range v.MustFloat64Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereFloat64 uses the specified decider function to select items +// from the []float64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereFloat64(decider func(int, float64) bool) *Value { + var selected []float64 + v.EachFloat64(func(index int, val float64) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupFloat64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]float64. +func (v *Value) GroupFloat64(grouper func(int, float64) string) *Value { + groups := make(map[string][]float64) + v.EachFloat64(func(index int, val float64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]float64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceFloat64 uses the specified function to replace each float64s +// by iterating each item. The data in the returned result will be a +// []float64 containing the replaced items. +func (v *Value) ReplaceFloat64(replacer func(int, float64) float64) *Value { + arr := v.MustFloat64Slice() + replaced := make([]float64, len(arr)) + v.EachFloat64(func(index int, val float64) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectFloat64 uses the specified collector function to collect a value +// for each of the float64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectFloat64(collector func(int, float64) interface{}) *Value { + arr := v.MustFloat64Slice() + collected := make([]interface{}, len(arr)) + v.EachFloat64(func(index int, val float64) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Complex64 (complex64 and []complex64) +*/ + +// Complex64 gets the value as a complex64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Complex64(optionalDefault ...complex64) complex64 { + if s, ok := v.data.(complex64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustComplex64 gets the value as a complex64. +// +// Panics if the object is not a complex64. +func (v *Value) MustComplex64() complex64 { + return v.data.(complex64) +} + +// Complex64Slice gets the value as a []complex64, returns the optionalDefault +// value or nil if the value is not a []complex64. +func (v *Value) Complex64Slice(optionalDefault ...[]complex64) []complex64 { + if s, ok := v.data.([]complex64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustComplex64Slice gets the value as a []complex64. +// +// Panics if the object is not a []complex64. +func (v *Value) MustComplex64Slice() []complex64 { + return v.data.([]complex64) +} + +// IsComplex64 gets whether the object contained is a complex64 or not. +func (v *Value) IsComplex64() bool { + _, ok := v.data.(complex64) + return ok +} + +// IsComplex64Slice gets whether the object contained is a []complex64 or not. +func (v *Value) IsComplex64Slice() bool { + _, ok := v.data.([]complex64) + return ok +} + +// EachComplex64 calls the specified callback for each object +// in the []complex64. +// +// Panics if the object is the wrong type. +func (v *Value) EachComplex64(callback func(int, complex64) bool) *Value { + for index, val := range v.MustComplex64Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereComplex64 uses the specified decider function to select items +// from the []complex64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereComplex64(decider func(int, complex64) bool) *Value { + var selected []complex64 + v.EachComplex64(func(index int, val complex64) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupComplex64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]complex64. +func (v *Value) GroupComplex64(grouper func(int, complex64) string) *Value { + groups := make(map[string][]complex64) + v.EachComplex64(func(index int, val complex64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]complex64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceComplex64 uses the specified function to replace each complex64s +// by iterating each item. The data in the returned result will be a +// []complex64 containing the replaced items. +func (v *Value) ReplaceComplex64(replacer func(int, complex64) complex64) *Value { + arr := v.MustComplex64Slice() + replaced := make([]complex64, len(arr)) + v.EachComplex64(func(index int, val complex64) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectComplex64 uses the specified collector function to collect a value +// for each of the complex64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectComplex64(collector func(int, complex64) interface{}) *Value { + arr := v.MustComplex64Slice() + collected := make([]interface{}, len(arr)) + v.EachComplex64(func(index int, val complex64) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Complex128 (complex128 and []complex128) +*/ + +// Complex128 gets the value as a complex128, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Complex128(optionalDefault ...complex128) complex128 { + if s, ok := v.data.(complex128); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustComplex128 gets the value as a complex128. +// +// Panics if the object is not a complex128. +func (v *Value) MustComplex128() complex128 { + return v.data.(complex128) +} + +// Complex128Slice gets the value as a []complex128, returns the optionalDefault +// value or nil if the value is not a []complex128. +func (v *Value) Complex128Slice(optionalDefault ...[]complex128) []complex128 { + if s, ok := v.data.([]complex128); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustComplex128Slice gets the value as a []complex128. +// +// Panics if the object is not a []complex128. +func (v *Value) MustComplex128Slice() []complex128 { + return v.data.([]complex128) +} + +// IsComplex128 gets whether the object contained is a complex128 or not. +func (v *Value) IsComplex128() bool { + _, ok := v.data.(complex128) + return ok +} + +// IsComplex128Slice gets whether the object contained is a []complex128 or not. +func (v *Value) IsComplex128Slice() bool { + _, ok := v.data.([]complex128) + return ok +} + +// EachComplex128 calls the specified callback for each object +// in the []complex128. +// +// Panics if the object is the wrong type. +func (v *Value) EachComplex128(callback func(int, complex128) bool) *Value { + for index, val := range v.MustComplex128Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereComplex128 uses the specified decider function to select items +// from the []complex128. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereComplex128(decider func(int, complex128) bool) *Value { + var selected []complex128 + v.EachComplex128(func(index int, val complex128) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupComplex128 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]complex128. +func (v *Value) GroupComplex128(grouper func(int, complex128) string) *Value { + groups := make(map[string][]complex128) + v.EachComplex128(func(index int, val complex128) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]complex128, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceComplex128 uses the specified function to replace each complex128s +// by iterating each item. The data in the returned result will be a +// []complex128 containing the replaced items. +func (v *Value) ReplaceComplex128(replacer func(int, complex128) complex128) *Value { + arr := v.MustComplex128Slice() + replaced := make([]complex128, len(arr)) + v.EachComplex128(func(index int, val complex128) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectComplex128 uses the specified collector function to collect a value +// for each of the complex128s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectComplex128(collector func(int, complex128) interface{}) *Value { + arr := v.MustComplex128Slice() + collected := make([]interface{}, len(arr)) + v.EachComplex128(func(index int, val complex128) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} diff --git a/vendor/github.com/stretchr/objx/value.go b/vendor/github.com/stretchr/objx/value.go new file mode 100644 index 000000000..4e5f9b77e --- /dev/null +++ b/vendor/github.com/stretchr/objx/value.go @@ -0,0 +1,159 @@ +package objx + +import ( + "fmt" + "strconv" +) + +// Value provides methods for extracting interface{} data in various +// types. +type Value struct { + // data contains the raw data being managed by this Value + data interface{} +} + +// Data returns the raw data contained by this Value +func (v *Value) Data() interface{} { + return v.data +} + +// String returns the value always as a string +func (v *Value) String() string { + switch { + case v.IsNil(): + return "" + case v.IsStr(): + return v.Str() + case v.IsBool(): + return strconv.FormatBool(v.Bool()) + case v.IsFloat32(): + return strconv.FormatFloat(float64(v.Float32()), 'f', -1, 32) + case v.IsFloat64(): + return strconv.FormatFloat(v.Float64(), 'f', -1, 64) + case v.IsInt(): + return strconv.FormatInt(int64(v.Int()), 10) + case v.IsInt8(): + return strconv.FormatInt(int64(v.Int8()), 10) + case v.IsInt16(): + return strconv.FormatInt(int64(v.Int16()), 10) + case v.IsInt32(): + return strconv.FormatInt(int64(v.Int32()), 10) + case v.IsInt64(): + return strconv.FormatInt(v.Int64(), 10) + case v.IsUint(): + return strconv.FormatUint(uint64(v.Uint()), 10) + case v.IsUint8(): + return strconv.FormatUint(uint64(v.Uint8()), 10) + case v.IsUint16(): + return strconv.FormatUint(uint64(v.Uint16()), 10) + case v.IsUint32(): + return strconv.FormatUint(uint64(v.Uint32()), 10) + case v.IsUint64(): + return strconv.FormatUint(v.Uint64(), 10) + } + return fmt.Sprintf("%#v", v.Data()) +} + +// StringSlice returns the value always as a []string +func (v *Value) StringSlice(optionalDefault ...[]string) []string { + switch { + case v.IsStrSlice(): + return v.MustStrSlice() + case v.IsBoolSlice(): + slice := v.MustBoolSlice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatBool(iv) + } + return vals + case v.IsFloat32Slice(): + slice := v.MustFloat32Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatFloat(float64(iv), 'f', -1, 32) + } + return vals + case v.IsFloat64Slice(): + slice := v.MustFloat64Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatFloat(iv, 'f', -1, 64) + } + return vals + case v.IsIntSlice(): + slice := v.MustIntSlice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatInt(int64(iv), 10) + } + return vals + case v.IsInt8Slice(): + slice := v.MustInt8Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatInt(int64(iv), 10) + } + return vals + case v.IsInt16Slice(): + slice := v.MustInt16Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatInt(int64(iv), 10) + } + return vals + case v.IsInt32Slice(): + slice := v.MustInt32Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatInt(int64(iv), 10) + } + return vals + case v.IsInt64Slice(): + slice := v.MustInt64Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatInt(iv, 10) + } + return vals + case v.IsUintSlice(): + slice := v.MustUintSlice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatUint(uint64(iv), 10) + } + return vals + case v.IsUint8Slice(): + slice := v.MustUint8Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatUint(uint64(iv), 10) + } + return vals + case v.IsUint16Slice(): + slice := v.MustUint16Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatUint(uint64(iv), 10) + } + return vals + case v.IsUint32Slice(): + slice := v.MustUint32Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatUint(uint64(iv), 10) + } + return vals + case v.IsUint64Slice(): + slice := v.MustUint64Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatUint(iv, 10) + } + return vals + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + + return []string{} +} diff --git a/vendor/github.com/stretchr/testify/mock/doc.go b/vendor/github.com/stretchr/testify/mock/doc.go new file mode 100644 index 000000000..d6b3c844c --- /dev/null +++ b/vendor/github.com/stretchr/testify/mock/doc.go @@ -0,0 +1,44 @@ +// Package mock provides a system by which it is possible to mock your objects +// and verify calls are happening as expected. +// +// # Example Usage +// +// The mock package provides an object, Mock, that tracks activity on another object. It is usually +// embedded into a test object as shown below: +// +// type MyTestObject struct { +// // add a Mock object instance +// mock.Mock +// +// // other fields go here as normal +// } +// +// When implementing the methods of an interface, you wire your functions up +// to call the Mock.Called(args...) method, and return the appropriate values. +// +// For example, to mock a method that saves the name and age of a person and returns +// the year of their birth or an error, you might write this: +// +// func (o *MyTestObject) SavePersonDetails(firstname, lastname string, age int) (int, error) { +// args := o.Called(firstname, lastname, age) +// return args.Int(0), args.Error(1) +// } +// +// The Int, Error and Bool methods are examples of strongly typed getters that take the argument +// index position. Given this argument list: +// +// (12, true, "Something") +// +// You could read them out strongly typed like this: +// +// args.Int(0) +// args.Bool(1) +// args.String(2) +// +// For objects of your own type, use the generic Arguments.Get(index) method and make a type assertion: +// +// return args.Get(0).(*MyObject), args.Get(1).(*AnotherObjectOfMine) +// +// This may cause a panic if the object you are getting is nil (the type assertion will fail), in those +// cases you should check for nil first. +package mock diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go new file mode 100644 index 000000000..213bde2ea --- /dev/null +++ b/vendor/github.com/stretchr/testify/mock/mock.go @@ -0,0 +1,1241 @@ +package mock + +import ( + "errors" + "fmt" + "path" + "reflect" + "regexp" + "runtime" + "strings" + "sync" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/pmezard/go-difflib/difflib" + "github.com/stretchr/objx" + + "github.com/stretchr/testify/assert" +) + +// regex for GCCGO functions +var gccgoRE = regexp.MustCompile(`\.pN\d+_`) + +// TestingT is an interface wrapper around *testing.T +type TestingT interface { + Logf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + FailNow() +} + +/* + Call +*/ + +// Call represents a method call and is used for setting expectations, +// as well as recording activity. +type Call struct { + Parent *Mock + + // The name of the method that was or will be called. + Method string + + // Holds the arguments of the method. + Arguments Arguments + + // Holds the arguments that should be returned when + // this method is called. + ReturnArguments Arguments + + // Holds the caller info for the On() call + callerInfo []string + + // The number of times to return the return arguments when setting + // expectations. 0 means to always return the value. + Repeatability int + + // Amount of times this call has been called + totalCalls int + + // Call to this method can be optional + optional bool + + // Holds a channel that will be used to block the Return until it either + // receives a message or is closed. nil means it returns immediately. + WaitFor <-chan time.Time + + waitTime time.Duration + + // Holds a handler used to manipulate arguments content that are passed by + // reference. It's useful when mocking methods such as unmarshalers or + // decoders. + RunFn func(Arguments) + + // PanicMsg holds msg to be used to mock panic on the function call + // if the PanicMsg is set to a non nil string the function call will panic + // irrespective of other settings + PanicMsg *string + + // Calls which must be satisfied before this call can be + requires []*Call +} + +func newCall(parent *Mock, methodName string, callerInfo []string, methodArguments ...interface{}) *Call { + return &Call{ + Parent: parent, + Method: methodName, + Arguments: methodArguments, + ReturnArguments: make([]interface{}, 0), + callerInfo: callerInfo, + Repeatability: 0, + WaitFor: nil, + RunFn: nil, + PanicMsg: nil, + } +} + +func (c *Call) lock() { + c.Parent.mutex.Lock() +} + +func (c *Call) unlock() { + c.Parent.mutex.Unlock() +} + +// Return specifies the return arguments for the expectation. +// +// Mock.On("DoSomething").Return(errors.New("failed")) +func (c *Call) Return(returnArguments ...interface{}) *Call { + c.lock() + defer c.unlock() + + c.ReturnArguments = returnArguments + + return c +} + +// Panic specifies if the function call should fail and the panic message +// +// Mock.On("DoSomething").Panic("test panic") +func (c *Call) Panic(msg string) *Call { + c.lock() + defer c.unlock() + + c.PanicMsg = &msg + + return c +} + +// Once indicates that the mock should only return the value once. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once() +func (c *Call) Once() *Call { + return c.Times(1) +} + +// Twice indicates that the mock should only return the value twice. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice() +func (c *Call) Twice() *Call { + return c.Times(2) +} + +// Times indicates that the mock should only return the indicated number +// of times. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5) +func (c *Call) Times(i int) *Call { + c.lock() + defer c.unlock() + c.Repeatability = i + return c +} + +// WaitUntil sets the channel that will block the mock's return until its closed +// or a message is received. +// +// Mock.On("MyMethod", arg1, arg2).WaitUntil(time.After(time.Second)) +func (c *Call) WaitUntil(w <-chan time.Time) *Call { + c.lock() + defer c.unlock() + c.WaitFor = w + return c +} + +// After sets how long to block until the call returns +// +// Mock.On("MyMethod", arg1, arg2).After(time.Second) +func (c *Call) After(d time.Duration) *Call { + c.lock() + defer c.unlock() + c.waitTime = d + return c +} + +// Run sets a handler to be called before returning. It can be used when +// mocking a method (such as an unmarshaler) that takes a pointer to a struct and +// sets properties in such struct +// +// Mock.On("Unmarshal", AnythingOfType("*map[string]interface{}")).Return().Run(func(args Arguments) { +// arg := args.Get(0).(*map[string]interface{}) +// arg["foo"] = "bar" +// }) +func (c *Call) Run(fn func(args Arguments)) *Call { + c.lock() + defer c.unlock() + c.RunFn = fn + return c +} + +// Maybe allows the method call to be optional. Not calling an optional method +// will not cause an error while asserting expectations +func (c *Call) Maybe() *Call { + c.lock() + defer c.unlock() + c.optional = true + return c +} + +// On chains a new expectation description onto the mocked interface. This +// allows syntax like. +// +// Mock. +// On("MyMethod", 1).Return(nil). +// On("MyOtherMethod", 'a', 'b', 'c').Return(errors.New("Some Error")) +// +//go:noinline +func (c *Call) On(methodName string, arguments ...interface{}) *Call { + return c.Parent.On(methodName, arguments...) +} + +// Unset removes a mock handler from being called. +// +// test.On("func", mock.Anything).Unset() +func (c *Call) Unset() *Call { + var unlockOnce sync.Once + + for _, arg := range c.Arguments { + if v := reflect.ValueOf(arg); v.Kind() == reflect.Func { + panic(fmt.Sprintf("cannot use Func in expectations. Use mock.AnythingOfType(\"%T\")", arg)) + } + } + + c.lock() + defer unlockOnce.Do(c.unlock) + + foundMatchingCall := false + + // in-place filter slice for calls to be removed - iterate from 0'th to last skipping unnecessary ones + var index int // write index + for _, call := range c.Parent.ExpectedCalls { + if call.Method == c.Method { + _, diffCount := call.Arguments.Diff(c.Arguments) + if diffCount == 0 { + foundMatchingCall = true + // Remove from ExpectedCalls - just skip it + continue + } + } + c.Parent.ExpectedCalls[index] = call + index++ + } + // trim slice up to last copied index + c.Parent.ExpectedCalls = c.Parent.ExpectedCalls[:index] + + if !foundMatchingCall { + unlockOnce.Do(c.unlock) + c.Parent.fail("\n\nmock: Could not find expected call\n-----------------------------\n\n%s\n\n", + callString(c.Method, c.Arguments, true), + ) + } + + return c +} + +// NotBefore indicates that the mock should only be called after the referenced +// calls have been called as expected. The referenced calls may be from the +// same mock instance and/or other mock instances. +// +// Mock.On("Do").Return(nil).Notbefore( +// Mock.On("Init").Return(nil) +// ) +func (c *Call) NotBefore(calls ...*Call) *Call { + c.lock() + defer c.unlock() + + for _, call := range calls { + if call.Parent == nil { + panic("not before calls must be created with Mock.On()") + } + } + + c.requires = append(c.requires, calls...) + return c +} + +// Mock is the workhorse used to track activity on another object. +// For an example of its usage, refer to the "Example Usage" section at the top +// of this document. +type Mock struct { + // Represents the calls that are expected of + // an object. + ExpectedCalls []*Call + + // Holds the calls that were made to this mocked object. + Calls []Call + + // test is An optional variable that holds the test struct, to be used when an + // invalid mock call was made. + test TestingT + + // TestData holds any data that might be useful for testing. Testify ignores + // this data completely allowing you to do whatever you like with it. + testData objx.Map + + mutex sync.Mutex +} + +// String provides a %v format string for Mock. +// Note: this is used implicitly by Arguments.Diff if a Mock is passed. +// It exists because go's default %v formatting traverses the struct +// without acquiring the mutex, which is detected by go test -race. +func (m *Mock) String() string { + return fmt.Sprintf("%[1]T<%[1]p>", m) +} + +// TestData holds any data that might be useful for testing. Testify ignores +// this data completely allowing you to do whatever you like with it. +func (m *Mock) TestData() objx.Map { + if m.testData == nil { + m.testData = make(objx.Map) + } + + return m.testData +} + +/* + Setting expectations +*/ + +// Test sets the test struct variable of the mock object +func (m *Mock) Test(t TestingT) { + m.mutex.Lock() + defer m.mutex.Unlock() + m.test = t +} + +// fail fails the current test with the given formatted format and args. +// In case that a test was defined, it uses the test APIs for failing a test, +// otherwise it uses panic. +func (m *Mock) fail(format string, args ...interface{}) { + m.mutex.Lock() + defer m.mutex.Unlock() + + if m.test == nil { + panic(fmt.Sprintf(format, args...)) + } + m.test.Errorf(format, args...) + m.test.FailNow() +} + +// On starts a description of an expectation of the specified method +// being called. +// +// Mock.On("MyMethod", arg1, arg2) +func (m *Mock) On(methodName string, arguments ...interface{}) *Call { + for _, arg := range arguments { + if v := reflect.ValueOf(arg); v.Kind() == reflect.Func { + panic(fmt.Sprintf("cannot use Func in expectations. Use mock.AnythingOfType(\"%T\")", arg)) + } + } + + m.mutex.Lock() + defer m.mutex.Unlock() + c := newCall(m, methodName, assert.CallerInfo(), arguments...) + m.ExpectedCalls = append(m.ExpectedCalls, c) + return c +} + +// /* +// Recording and responding to activity +// */ + +func (m *Mock) findExpectedCall(method string, arguments ...interface{}) (int, *Call) { + var expectedCall *Call + + for i, call := range m.ExpectedCalls { + if call.Method == method { + _, diffCount := call.Arguments.Diff(arguments) + if diffCount == 0 { + expectedCall = call + if call.Repeatability > -1 { + return i, call + } + } + } + } + + return -1, expectedCall +} + +type matchCandidate struct { + call *Call + mismatch string + diffCount int +} + +func (c matchCandidate) isBetterMatchThan(other matchCandidate) bool { + if c.call == nil { + return false + } + if other.call == nil { + return true + } + + if c.diffCount > other.diffCount { + return false + } + if c.diffCount < other.diffCount { + return true + } + + if c.call.Repeatability > 0 && other.call.Repeatability <= 0 { + return true + } + return false +} + +func (m *Mock) findClosestCall(method string, arguments ...interface{}) (*Call, string) { + var bestMatch matchCandidate + + for _, call := range m.expectedCalls() { + if call.Method == method { + + errInfo, tempDiffCount := call.Arguments.Diff(arguments) + tempCandidate := matchCandidate{ + call: call, + mismatch: errInfo, + diffCount: tempDiffCount, + } + if tempCandidate.isBetterMatchThan(bestMatch) { + bestMatch = tempCandidate + } + } + } + + return bestMatch.call, bestMatch.mismatch +} + +func callString(method string, arguments Arguments, includeArgumentValues bool) string { + var argValsString string + if includeArgumentValues { + var argVals []string + for argIndex, arg := range arguments { + if _, ok := arg.(*FunctionalOptionsArgument); ok { + argVals = append(argVals, fmt.Sprintf("%d: %s", argIndex, arg)) + continue + } + argVals = append(argVals, fmt.Sprintf("%d: %#v", argIndex, arg)) + } + argValsString = fmt.Sprintf("\n\t\t%s", strings.Join(argVals, "\n\t\t")) + } + + return fmt.Sprintf("%s(%s)%s", method, arguments.String(), argValsString) +} + +// Called tells the mock object that a method has been called, and gets an array +// of arguments to return. Panics if the call is unexpected (i.e. not preceded by +// appropriate .On .Return() calls) +// If Call.WaitFor is set, blocks until the channel is closed or receives a message. +func (m *Mock) Called(arguments ...interface{}) Arguments { + // get the calling function's name + pc, _, _, ok := runtime.Caller(1) + if !ok { + panic("Couldn't get the caller information") + } + functionPath := runtime.FuncForPC(pc).Name() + // Next four lines are required to use GCCGO function naming conventions. + // For Ex: github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock + // uses interface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree + // With GCCGO we need to remove interface information starting from pN
. + if gccgoRE.MatchString(functionPath) { + functionPath = gccgoRE.Split(functionPath, -1)[0] + } + parts := strings.Split(functionPath, ".") + functionName := parts[len(parts)-1] + return m.MethodCalled(functionName, arguments...) +} + +// MethodCalled tells the mock object that the given method has been called, and gets +// an array of arguments to return. Panics if the call is unexpected (i.e. not preceded +// by appropriate .On .Return() calls) +// If Call.WaitFor is set, blocks until the channel is closed or receives a message. +func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Arguments { + m.mutex.Lock() + // TODO: could combine expected and closes in single loop + found, call := m.findExpectedCall(methodName, arguments...) + + if found < 0 { + // expected call found, but it has already been called with repeatable times + if call != nil { + m.mutex.Unlock() + m.fail("\nassert: mock: The method has been called over %d times.\n\tEither do one more Mock.On(\"%s\").Return(...), or remove extra call.\n\tThis call was unexpected:\n\t\t%s\n\tat: %s", call.totalCalls, methodName, callString(methodName, arguments, true), assert.CallerInfo()) + } + // we have to fail here - because we don't know what to do + // as the return arguments. This is because: + // + // a) this is a totally unexpected call to this method, + // b) the arguments are not what was expected, or + // c) the developer has forgotten to add an accompanying On...Return pair. + closestCall, mismatch := m.findClosestCall(methodName, arguments...) + m.mutex.Unlock() + + if closestCall != nil { + m.fail("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n\n%s\nDiff: %s", + callString(methodName, arguments, true), + callString(methodName, closestCall.Arguments, true), + diffArguments(closestCall.Arguments, arguments), + strings.TrimSpace(mismatch), + ) + } else { + m.fail("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", methodName, methodName, callString(methodName, arguments, true), assert.CallerInfo()) + } + } + + for _, requirement := range call.requires { + if satisfied, _ := requirement.Parent.checkExpectation(requirement); !satisfied { + m.mutex.Unlock() + m.fail("mock: Unexpected Method Call\n-----------------------------\n\n%s\n\nMust not be called before%s:\n\n%s", + callString(call.Method, call.Arguments, true), + func() (s string) { + if requirement.totalCalls > 0 { + s = " another call of" + } + if call.Parent != requirement.Parent { + s += " method from another mock instance" + } + return + }(), + callString(requirement.Method, requirement.Arguments, true), + ) + } + } + + if call.Repeatability == 1 { + call.Repeatability = -1 + } else if call.Repeatability > 1 { + call.Repeatability-- + } + call.totalCalls++ + + // add the call + m.Calls = append(m.Calls, *newCall(m, methodName, assert.CallerInfo(), arguments...)) + m.mutex.Unlock() + + // block if specified + if call.WaitFor != nil { + <-call.WaitFor + } else { + time.Sleep(call.waitTime) + } + + m.mutex.Lock() + panicMsg := call.PanicMsg + m.mutex.Unlock() + if panicMsg != nil { + panic(*panicMsg) + } + + m.mutex.Lock() + runFn := call.RunFn + m.mutex.Unlock() + + if runFn != nil { + runFn(arguments) + } + + m.mutex.Lock() + returnArgs := call.ReturnArguments + m.mutex.Unlock() + + return returnArgs +} + +/* + Assertions +*/ + +type assertExpectationiser interface { + AssertExpectations(TestingT) bool +} + +// AssertExpectationsForObjects asserts that everything specified with On and Return +// of the specified objects was in fact called as expected. +// +// Calls may have occurred in any order. +func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + for _, obj := range testObjects { + if m, ok := obj.(*Mock); ok { + t.Logf("Deprecated mock.AssertExpectationsForObjects(myMock.Mock) use mock.AssertExpectationsForObjects(myMock)") + obj = m + } + m := obj.(assertExpectationiser) + if !m.AssertExpectations(t) { + t.Logf("Expectations didn't match for Mock: %+v", reflect.TypeOf(m)) + return false + } + } + return true +} + +// AssertExpectations asserts that everything specified with On and Return was +// in fact called as expected. Calls may have occurred in any order. +func (m *Mock) AssertExpectations(t TestingT) bool { + if s, ok := t.(interface{ Skipped() bool }); ok && s.Skipped() { + return true + } + if h, ok := t.(tHelper); ok { + h.Helper() + } + + m.mutex.Lock() + defer m.mutex.Unlock() + var failedExpectations int + + // iterate through each expectation + expectedCalls := m.expectedCalls() + for _, expectedCall := range expectedCalls { + satisfied, reason := m.checkExpectation(expectedCall) + if !satisfied { + failedExpectations++ + t.Logf(reason) + } + } + + if failedExpectations != 0 { + t.Errorf("FAIL: %d out of %d expectation(s) were met.\n\tThe code you are testing needs to make %d more call(s).\n\tat: %s", len(expectedCalls)-failedExpectations, len(expectedCalls), failedExpectations, assert.CallerInfo()) + } + + return failedExpectations == 0 +} + +func (m *Mock) checkExpectation(call *Call) (bool, string) { + if !call.optional && !m.methodWasCalled(call.Method, call.Arguments) && call.totalCalls == 0 { + return false, fmt.Sprintf("FAIL:\t%s(%s)\n\t\tat: %s", call.Method, call.Arguments.String(), call.callerInfo) + } + if call.Repeatability > 0 { + return false, fmt.Sprintf("FAIL:\t%s(%s)\n\t\tat: %s", call.Method, call.Arguments.String(), call.callerInfo) + } + return true, fmt.Sprintf("PASS:\t%s(%s)", call.Method, call.Arguments.String()) +} + +// AssertNumberOfCalls asserts that the method was called expectedCalls times. +func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls int) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + m.mutex.Lock() + defer m.mutex.Unlock() + var actualCalls int + for _, call := range m.calls() { + if call.Method == methodName { + actualCalls++ + } + } + return assert.Equal(t, expectedCalls, actualCalls, fmt.Sprintf("Expected number of calls (%d) does not match the actual number of calls (%d).", expectedCalls, actualCalls)) +} + +// AssertCalled asserts that the method was called. +// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method. +func (m *Mock) AssertCalled(t TestingT, methodName string, arguments ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + m.mutex.Lock() + defer m.mutex.Unlock() + if !m.methodWasCalled(methodName, arguments) { + var calledWithArgs []string + for _, call := range m.calls() { + calledWithArgs = append(calledWithArgs, fmt.Sprintf("%v", call.Arguments)) + } + if len(calledWithArgs) == 0 { + return assert.Fail(t, "Should have called with given arguments", + fmt.Sprintf("Expected %q to have been called with:\n%v\nbut no actual calls happened", methodName, arguments)) + } + return assert.Fail(t, "Should have called with given arguments", + fmt.Sprintf("Expected %q to have been called with:\n%v\nbut actual calls were:\n %v", methodName, arguments, strings.Join(calledWithArgs, "\n"))) + } + return true +} + +// AssertNotCalled asserts that the method was not called. +// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method. +func (m *Mock) AssertNotCalled(t TestingT, methodName string, arguments ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + m.mutex.Lock() + defer m.mutex.Unlock() + if m.methodWasCalled(methodName, arguments) { + return assert.Fail(t, "Should not have called with given arguments", + fmt.Sprintf("Expected %q to not have been called with:\n%v\nbut actually it was.", methodName, arguments)) + } + return true +} + +// IsMethodCallable checking that the method can be called +// If the method was called more than `Repeatability` return false +func (m *Mock) IsMethodCallable(t TestingT, methodName string, arguments ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + m.mutex.Lock() + defer m.mutex.Unlock() + + for _, v := range m.ExpectedCalls { + if v.Method != methodName { + continue + } + if len(arguments) != len(v.Arguments) { + continue + } + if v.Repeatability < v.totalCalls { + continue + } + if isArgsEqual(v.Arguments, arguments) { + return true + } + } + return false +} + +// isArgsEqual compares arguments +func isArgsEqual(expected Arguments, args []interface{}) bool { + if len(expected) != len(args) { + return false + } + for i, v := range args { + if !reflect.DeepEqual(expected[i], v) { + return false + } + } + return true +} + +func (m *Mock) methodWasCalled(methodName string, expected []interface{}) bool { + for _, call := range m.calls() { + if call.Method == methodName { + + _, differences := Arguments(expected).Diff(call.Arguments) + + if differences == 0 { + // found the expected call + return true + } + + } + } + // we didn't find the expected call + return false +} + +func (m *Mock) expectedCalls() []*Call { + return append([]*Call{}, m.ExpectedCalls...) +} + +func (m *Mock) calls() []Call { + return append([]Call{}, m.Calls...) +} + +/* + Arguments +*/ + +// Arguments holds an array of method arguments or return values. +type Arguments []interface{} + +const ( + // Anything is used in Diff and Assert when the argument being tested + // shouldn't be taken into consideration. + Anything = "mock.Anything" +) + +// AnythingOfTypeArgument contains the type of an argument +// for use when type checking. Used in Diff and Assert. +// +// Deprecated: this is an implementation detail that must not be used. Use [AnythingOfType] instead. +type AnythingOfTypeArgument = anythingOfTypeArgument + +// anythingOfTypeArgument is a string that contains the type of an argument +// for use when type checking. Used in Diff and Assert. +type anythingOfTypeArgument string + +// AnythingOfType returns a special value containing the +// name of the type to check for. The type name will be matched against the type name returned by [reflect.Type.String]. +// +// Used in Diff and Assert. +// +// For example: +// +// Assert(t, AnythingOfType("string"), AnythingOfType("int")) +func AnythingOfType(t string) AnythingOfTypeArgument { + return anythingOfTypeArgument(t) +} + +// IsTypeArgument is a struct that contains the type of an argument +// for use when type checking. This is an alternative to AnythingOfType. +// Used in Diff and Assert. +type IsTypeArgument struct { + t reflect.Type +} + +// IsType returns an IsTypeArgument object containing the type to check for. +// You can provide a zero-value of the type to check. This is an +// alternative to AnythingOfType. Used in Diff and Assert. +// +// For example: +// Assert(t, IsType(""), IsType(0)) +func IsType(t interface{}) *IsTypeArgument { + return &IsTypeArgument{t: reflect.TypeOf(t)} +} + +// FunctionalOptionsArgument is a struct that contains the type and value of an functional option argument +// for use when type checking. +type FunctionalOptionsArgument struct { + value interface{} +} + +// String returns the string representation of FunctionalOptionsArgument +func (f *FunctionalOptionsArgument) String() string { + var name string + tValue := reflect.ValueOf(f.value) + if tValue.Len() > 0 { + name = "[]" + reflect.TypeOf(tValue.Index(0).Interface()).String() + } + + return strings.Replace(fmt.Sprintf("%#v", f.value), "[]interface {}", name, 1) +} + +// FunctionalOptions returns an FunctionalOptionsArgument object containing the functional option type +// and the values to check of +// +// For example: +// Assert(t, FunctionalOptions("[]foo.FunctionalOption", foo.Opt1(), foo.Opt2())) +func FunctionalOptions(value ...interface{}) *FunctionalOptionsArgument { + return &FunctionalOptionsArgument{ + value: value, + } +} + +// argumentMatcher performs custom argument matching, returning whether or +// not the argument is matched by the expectation fixture function. +type argumentMatcher struct { + // fn is a function which accepts one argument, and returns a bool. + fn reflect.Value +} + +func (f argumentMatcher) Matches(argument interface{}) bool { + expectType := f.fn.Type().In(0) + expectTypeNilSupported := false + switch expectType.Kind() { + case reflect.Interface, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.Ptr: + expectTypeNilSupported = true + } + + argType := reflect.TypeOf(argument) + var arg reflect.Value + if argType == nil { + arg = reflect.New(expectType).Elem() + } else { + arg = reflect.ValueOf(argument) + } + + if argType == nil && !expectTypeNilSupported { + panic(errors.New("attempting to call matcher with nil for non-nil expected type")) + } + if argType == nil || argType.AssignableTo(expectType) { + result := f.fn.Call([]reflect.Value{arg}) + return result[0].Bool() + } + return false +} + +func (f argumentMatcher) String() string { + return fmt.Sprintf("func(%s) bool", f.fn.Type().In(0).String()) +} + +// MatchedBy can be used to match a mock call based on only certain properties +// from a complex struct or some calculation. It takes a function that will be +// evaluated with the called argument and will return true when there's a match +// and false otherwise. +// +// Example: +// m.On("Do", MatchedBy(func(req *http.Request) bool { return req.Host == "example.com" })) +// +// |fn|, must be a function accepting a single argument (of the expected type) +// which returns a bool. If |fn| doesn't match the required signature, +// MatchedBy() panics. +func MatchedBy(fn interface{}) argumentMatcher { + fnType := reflect.TypeOf(fn) + + if fnType.Kind() != reflect.Func { + panic(fmt.Sprintf("assert: arguments: %s is not a func", fn)) + } + if fnType.NumIn() != 1 { + panic(fmt.Sprintf("assert: arguments: %s does not take exactly one argument", fn)) + } + if fnType.NumOut() != 1 || fnType.Out(0).Kind() != reflect.Bool { + panic(fmt.Sprintf("assert: arguments: %s does not return a bool", fn)) + } + + return argumentMatcher{fn: reflect.ValueOf(fn)} +} + +// Get Returns the argument at the specified index. +func (args Arguments) Get(index int) interface{} { + if index+1 > len(args) { + panic(fmt.Sprintf("assert: arguments: Cannot call Get(%d) because there are %d argument(s).", index, len(args))) + } + return args[index] +} + +// Is gets whether the objects match the arguments specified. +func (args Arguments) Is(objects ...interface{}) bool { + for i, obj := range args { + if obj != objects[i] { + return false + } + } + return true +} + +// Diff gets a string describing the differences between the arguments +// and the specified objects. +// +// Returns the diff string and number of differences found. +func (args Arguments) Diff(objects []interface{}) (string, int) { + // TODO: could return string as error and nil for No difference + + output := "\n" + var differences int + + maxArgCount := len(args) + if len(objects) > maxArgCount { + maxArgCount = len(objects) + } + + for i := 0; i < maxArgCount; i++ { + var actual, expected interface{} + var actualFmt, expectedFmt string + + if len(objects) <= i { + actual = "(Missing)" + actualFmt = "(Missing)" + } else { + actual = objects[i] + actualFmt = fmt.Sprintf("(%[1]T=%[1]v)", actual) + } + + if len(args) <= i { + expected = "(Missing)" + expectedFmt = "(Missing)" + } else { + expected = args[i] + expectedFmt = fmt.Sprintf("(%[1]T=%[1]v)", expected) + } + + if matcher, ok := expected.(argumentMatcher); ok { + var matches bool + func() { + defer func() { + if r := recover(); r != nil { + actualFmt = fmt.Sprintf("panic in argument matcher: %v", r) + } + }() + matches = matcher.Matches(actual) + }() + if matches { + output = fmt.Sprintf("%s\t%d: PASS: %s matched by %s\n", output, i, actualFmt, matcher) + } else { + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: %s not matched by %s\n", output, i, actualFmt, matcher) + } + } else { + switch expected := expected.(type) { + case anythingOfTypeArgument: + // type checking + if reflect.TypeOf(actual).Name() != string(expected) && reflect.TypeOf(actual).String() != string(expected) { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actualFmt) + } + case *IsTypeArgument: + actualT := reflect.TypeOf(actual) + if actualT != expected.t { + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected.t.Name(), actualT.Name(), actualFmt) + } + case *FunctionalOptionsArgument: + t := expected.value + + var name string + tValue := reflect.ValueOf(t) + if tValue.Len() > 0 { + name = "[]" + reflect.TypeOf(tValue.Index(0).Interface()).String() + } + + tName := reflect.TypeOf(t).Name() + if name != reflect.TypeOf(actual).String() && tValue.Len() != 0 { + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, tName, reflect.TypeOf(actual).Name(), actualFmt) + } else { + if ef, af := assertOpts(t, actual); ef == "" && af == "" { + // match + output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, tName, tName) + } else { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, af, ef) + } + } + + default: + if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { + // match + output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, actualFmt, expectedFmt) + } else { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, actualFmt, expectedFmt) + } + } + } + + } + + if differences == 0 { + return "No differences.", differences + } + + return output, differences +} + +// Assert compares the arguments with the specified objects and fails if +// they do not exactly match. +func (args Arguments) Assert(t TestingT, objects ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + // get the differences + diff, diffCount := args.Diff(objects) + + if diffCount == 0 { + return true + } + + // there are differences... report them... + t.Logf(diff) + t.Errorf("%sArguments do not match.", assert.CallerInfo()) + + return false +} + +// String gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +// +// If no index is provided, String() returns a complete string representation +// of the arguments. +func (args Arguments) String(indexOrNil ...int) string { + if len(indexOrNil) == 0 { + // normal String() method - return a string representation of the args + var argsStr []string + for _, arg := range args { + argsStr = append(argsStr, fmt.Sprintf("%T", arg)) // handles nil nicely + } + return strings.Join(argsStr, ",") + } else if len(indexOrNil) == 1 { + // Index has been specified - get the argument at that index + index := indexOrNil[0] + var s string + var ok bool + if s, ok = args.Get(index).(string); !ok { + panic(fmt.Sprintf("assert: arguments: String(%d) failed because object wasn't correct type: %s", index, args.Get(index))) + } + return s + } + + panic(fmt.Sprintf("assert: arguments: Wrong number of arguments passed to String. Must be 0 or 1, not %d", len(indexOrNil))) +} + +// Int gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Int(index int) int { + var s int + var ok bool + if s, ok = args.Get(index).(int); !ok { + panic(fmt.Sprintf("assert: arguments: Int(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} + +// Error gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Error(index int) error { + obj := args.Get(index) + var s error + var ok bool + if obj == nil { + return nil + } + if s, ok = obj.(error); !ok { + panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} + +// Bool gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Bool(index int) bool { + var s bool + var ok bool + if s, ok = args.Get(index).(bool); !ok { + panic(fmt.Sprintf("assert: arguments: Bool(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} + +func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { + t := reflect.TypeOf(v) + k := t.Kind() + + if k == reflect.Ptr { + t = t.Elem() + k = t.Kind() + } + return t, k +} + +func diffArguments(expected Arguments, actual Arguments) string { + if len(expected) != len(actual) { + return fmt.Sprintf("Provided %v arguments, mocked for %v arguments", len(expected), len(actual)) + } + + for x := range expected { + if diffString := diff(expected[x], actual[x]); diffString != "" { + return fmt.Sprintf("Difference found in argument %v:\n\n%s", x, diffString) + } + } + + return "" +} + +// diff returns a diff of both values as long as both are of the same type and +// are a struct, map, slice or array. Otherwise it returns an empty string. +func diff(expected interface{}, actual interface{}) string { + if expected == nil || actual == nil { + return "" + } + + et, ek := typeAndKind(expected) + at, _ := typeAndKind(actual) + + if et != at { + return "" + } + + if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array { + return "" + } + + e := spewConfig.Sdump(expected) + a := spewConfig.Sdump(actual) + + diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ + A: difflib.SplitLines(e), + B: difflib.SplitLines(a), + FromFile: "Expected", + FromDate: "", + ToFile: "Actual", + ToDate: "", + Context: 1, + }) + + return diff +} + +var spewConfig = spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, +} + +type tHelper interface { + Helper() +} + +func assertOpts(expected, actual interface{}) (expectedFmt, actualFmt string) { + expectedOpts := reflect.ValueOf(expected) + actualOpts := reflect.ValueOf(actual) + var expectedNames []string + for i := 0; i < expectedOpts.Len(); i++ { + expectedNames = append(expectedNames, funcName(expectedOpts.Index(i).Interface())) + } + var actualNames []string + for i := 0; i < actualOpts.Len(); i++ { + actualNames = append(actualNames, funcName(actualOpts.Index(i).Interface())) + } + if !assert.ObjectsAreEqual(expectedNames, actualNames) { + expectedFmt = fmt.Sprintf("%v", expectedNames) + actualFmt = fmt.Sprintf("%v", actualNames) + return + } + + for i := 0; i < expectedOpts.Len(); i++ { + expectedOpt := expectedOpts.Index(i).Interface() + actualOpt := actualOpts.Index(i).Interface() + + expectedFunc := expectedNames[i] + actualFunc := actualNames[i] + if expectedFunc != actualFunc { + expectedFmt = expectedFunc + actualFmt = actualFunc + return + } + + ot := reflect.TypeOf(expectedOpt) + var expectedValues []reflect.Value + var actualValues []reflect.Value + if ot.NumIn() == 0 { + return + } + + for i := 0; i < ot.NumIn(); i++ { + vt := ot.In(i).Elem() + expectedValues = append(expectedValues, reflect.New(vt)) + actualValues = append(actualValues, reflect.New(vt)) + } + + reflect.ValueOf(expectedOpt).Call(expectedValues) + reflect.ValueOf(actualOpt).Call(actualValues) + + for i := 0; i < ot.NumIn(); i++ { + if !assert.ObjectsAreEqual(expectedValues[i].Interface(), actualValues[i].Interface()) { + expectedFmt = fmt.Sprintf("%s %+v", expectedNames[i], expectedValues[i].Interface()) + actualFmt = fmt.Sprintf("%s %+v", expectedNames[i], actualValues[i].Interface()) + return + } + } + } + + return "", "" +} + +func funcName(opt interface{}) string { + n := runtime.FuncForPC(reflect.ValueOf(opt).Pointer()).Name() + return strings.TrimSuffix(path.Base(n), path.Ext(n)) +} diff --git a/vendor/golang.org/x/crypto/blowfish/block.go b/vendor/golang.org/x/crypto/blowfish/block.go new file mode 100644 index 000000000..9d80f1952 --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/block.go @@ -0,0 +1,159 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blowfish + +// getNextWord returns the next big-endian uint32 value from the byte slice +// at the given position in a circular manner, updating the position. +func getNextWord(b []byte, pos *int) uint32 { + var w uint32 + j := *pos + for i := 0; i < 4; i++ { + w = w<<8 | uint32(b[j]) + j++ + if j >= len(b) { + j = 0 + } + } + *pos = j + return w +} + +// ExpandKey performs a key expansion on the given *Cipher. Specifically, it +// performs the Blowfish algorithm's key schedule which sets up the *Cipher's +// pi and substitution tables for calls to Encrypt. This is used, primarily, +// by the bcrypt package to reuse the Blowfish key schedule during its +// set up. It's unlikely that you need to use this directly. +func ExpandKey(key []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + // Using inlined getNextWord for performance. + var d uint32 + for k := 0; k < 4; k++ { + d = d<<8 | uint32(key[j]) + j++ + if j >= len(key) { + j = 0 + } + } + c.p[i] ^= d + } + + var l, r uint32 + for i := 0; i < 18; i += 2 { + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +// This is similar to ExpandKey, but folds the salt during the key +// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero +// salt passed in, reusing ExpandKey turns out to be a place of inefficiency +// and specializing it here is useful. +func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + c.p[i] ^= getNextWord(key, &j) + } + + j = 0 + var l, r uint32 + for i := 0; i < 18; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[0] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16] + xr ^= c.p[17] + return xr, xl +} + +func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[17] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1] + xr ^= c.p[0] + return xr, xl +} diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go new file mode 100644 index 000000000..089895680 --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/cipher.go @@ -0,0 +1,99 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm. +// +// Blowfish is a legacy cipher and its short block size makes it vulnerable to +// birthday bound attacks (see https://sweet32.info). It should only be used +// where compatibility with legacy systems, not security, is the goal. +// +// Deprecated: any new system should use AES (from crypto/aes, if necessary in +// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from +// golang.org/x/crypto/chacha20poly1305). +package blowfish + +// The code is a port of Bruce Schneier's C implementation. +// See https://www.schneier.com/blowfish.html. + +import "strconv" + +// The Blowfish block size in bytes. +const BlockSize = 8 + +// A Cipher is an instance of Blowfish encryption using a particular key. +type Cipher struct { + p [18]uint32 + s0, s1, s2, s3 [256]uint32 +} + +type KeySizeError int + +func (k KeySizeError) Error() string { + return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k)) +} + +// NewCipher creates and returns a Cipher. +// The key argument should be the Blowfish key, from 1 to 56 bytes. +func NewCipher(key []byte) (*Cipher, error) { + var result Cipher + if k := len(key); k < 1 || k > 56 { + return nil, KeySizeError(k) + } + initCipher(&result) + ExpandKey(key, &result) + return &result, nil +} + +// NewSaltedCipher creates a returns a Cipher that folds a salt into its key +// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is +// sufficient and desirable. For bcrypt compatibility, the key can be over 56 +// bytes. +func NewSaltedCipher(key, salt []byte) (*Cipher, error) { + if len(salt) == 0 { + return NewCipher(key) + } + var result Cipher + if k := len(key); k < 1 { + return nil, KeySizeError(k) + } + initCipher(&result) + expandKeyWithSalt(key, salt, &result) + return &result, nil +} + +// BlockSize returns the Blowfish block size, 8 bytes. +// It is necessary to satisfy the Block interface in the +// package "crypto/cipher". +func (c *Cipher) BlockSize() int { return BlockSize } + +// Encrypt encrypts the 8-byte buffer src using the key k +// and stores the result in dst. +// Note that for amounts of data larger than a block, +// it is not safe to just call Encrypt on successive blocks; +// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). +func (c *Cipher) Encrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = encryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +// Decrypt decrypts the 8-byte buffer src using the key k +// and stores the result in dst. +func (c *Cipher) Decrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = decryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +func initCipher(c *Cipher) { + copy(c.p[0:], p[0:]) + copy(c.s0[0:], s0[0:]) + copy(c.s1[0:], s1[0:]) + copy(c.s2[0:], s2[0:]) + copy(c.s3[0:], s3[0:]) +} diff --git a/vendor/golang.org/x/crypto/blowfish/const.go b/vendor/golang.org/x/crypto/blowfish/const.go new file mode 100644 index 000000000..d04077595 --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/const.go @@ -0,0 +1,199 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The startup permutation array and substitution boxes. +// They are the hexadecimal digits of PI; see: +// https://www.schneier.com/code/constants.txt. + +package blowfish + +var s0 = [256]uint32{ + 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, + 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, + 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, + 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, + 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, + 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, + 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, + 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, + 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, + 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, + 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, + 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, + 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, + 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, + 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, + 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, + 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, + 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, + 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, + 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, + 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, + 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, + 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, + 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, + 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, + 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, + 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, + 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, + 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, + 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, + 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, + 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, + 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, + 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, + 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, + 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, + 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, + 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, + 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, + 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, + 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, + 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, + 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, +} + +var s1 = [256]uint32{ + 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, + 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, + 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, + 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, + 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, + 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, + 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, + 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, + 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, + 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, + 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, + 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, + 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, + 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, + 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, + 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, + 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, + 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, + 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, + 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, + 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, + 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, + 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, + 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, + 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, + 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, + 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, + 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, + 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, + 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, + 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, + 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, + 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, + 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, + 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, + 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, + 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, + 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, + 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, + 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, + 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, + 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, + 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, +} + +var s2 = [256]uint32{ + 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, + 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, + 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, + 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, + 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, + 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, + 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, + 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, + 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, + 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, + 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, + 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, + 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, + 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, + 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, + 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, + 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, + 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, + 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, + 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, + 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, + 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, + 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, + 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, + 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, + 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, + 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, + 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, + 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, + 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, + 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, + 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, + 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, + 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, + 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, + 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, + 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, + 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, + 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, + 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, + 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, + 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, + 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, +} + +var s3 = [256]uint32{ + 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, + 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, + 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, + 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, + 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, + 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, + 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, + 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, + 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, + 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, + 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, + 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, + 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, + 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, + 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, + 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, + 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, + 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, + 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, + 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, + 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, + 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, + 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, + 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, + 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, + 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, + 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, + 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, + 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, + 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, + 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, + 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, + 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, + 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, + 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, + 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, + 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, + 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, + 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, + 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, + 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, + 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, + 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, +} + +var p = [18]uint32{ + 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, + 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, + 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b, +} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go new file mode 100644 index 000000000..21ca3b2ee --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go @@ -0,0 +1,90 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package curve25519 provides an implementation of the X25519 function, which +// performs scalar multiplication on the elliptic curve known as Curve25519. +// See RFC 7748. +// +// This package is a wrapper for the X25519 implementation +// in the crypto/ecdh package. +package curve25519 + +import "crypto/ecdh" + +// ScalarMult sets dst to the product scalar * point. +// +// Deprecated: when provided a low-order point, ScalarMult will set dst to all +// zeroes, irrespective of the scalar. Instead, use the X25519 function, which +// will return an error. +func ScalarMult(dst, scalar, point *[32]byte) { + if _, err := x25519(dst, scalar[:], point[:]); err != nil { + // The only error condition for x25519 when the inputs are 32 bytes long + // is if the output would have been the all-zero value. + for i := range dst { + dst[i] = 0 + } + } +} + +// ScalarBaseMult sets dst to the product scalar * base where base is the +// standard generator. +// +// It is recommended to use the X25519 function with Basepoint instead, as +// copying into fixed size arrays can lead to unexpected bugs. +func ScalarBaseMult(dst, scalar *[32]byte) { + curve := ecdh.X25519() + priv, err := curve.NewPrivateKey(scalar[:]) + if err != nil { + panic("curve25519: internal error: scalarBaseMult was not 32 bytes") + } + copy(dst[:], priv.PublicKey().Bytes()) +} + +const ( + // ScalarSize is the size of the scalar input to X25519. + ScalarSize = 32 + // PointSize is the size of the point input to X25519. + PointSize = 32 +) + +// Basepoint is the canonical Curve25519 generator. +var Basepoint []byte + +var basePoint = [32]byte{9} + +func init() { Basepoint = basePoint[:] } + +// X25519 returns the result of the scalar multiplication (scalar * point), +// according to RFC 7748, Section 5. scalar, point and the return value are +// slices of 32 bytes. +// +// scalar can be generated at random, for example with crypto/rand. point should +// be either Basepoint or the output of another X25519 call. +// +// If point is Basepoint (but not if it's a different slice with the same +// contents) a precomputed implementation might be used for performance. +func X25519(scalar, point []byte) ([]byte, error) { + // Outline the body of function, to let the allocation be inlined in the + // caller, and possibly avoid escaping to the heap. + var dst [32]byte + return x25519(&dst, scalar, point) +} + +func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) { + curve := ecdh.X25519() + pub, err := curve.NewPublicKey(point) + if err != nil { + return nil, err + } + priv, err := curve.NewPrivateKey(scalar) + if err != nil { + return nil, err + } + out, err := priv.ECDH(pub) + if err != nil { + return nil, err + } + copy(dst[:], out) + return dst[:], nil +} diff --git a/vendor/golang.org/x/crypto/pkcs12/bmp-string.go b/vendor/golang.org/x/crypto/pkcs12/bmp-string.go new file mode 100644 index 000000000..233b8b62c --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/bmp-string.go @@ -0,0 +1,50 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "errors" + "unicode/utf16" +) + +// bmpString returns s encoded in UCS-2 with a zero terminator. +func bmpString(s string) ([]byte, error) { + // References: + // https://tools.ietf.org/html/rfc7292#appendix-B.1 + // https://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane + // - non-BMP characters are encoded in UTF 16 by using a surrogate pair of 16-bit codes + // EncodeRune returns 0xfffd if the rune does not need special encoding + // - the above RFC provides the info that BMPStrings are NULL terminated. + + ret := make([]byte, 0, 2*len(s)+2) + + for _, r := range s { + if t, _ := utf16.EncodeRune(r); t != 0xfffd { + return nil, errors.New("pkcs12: string contains characters that cannot be encoded in UCS-2") + } + ret = append(ret, byte(r/256), byte(r%256)) + } + + return append(ret, 0, 0), nil +} + +func decodeBMPString(bmpString []byte) (string, error) { + if len(bmpString)%2 != 0 { + return "", errors.New("pkcs12: odd-length BMP string") + } + + // strip terminator if present + if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 { + bmpString = bmpString[:l-2] + } + + s := make([]uint16, 0, len(bmpString)/2) + for len(bmpString) > 0 { + s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1])) + bmpString = bmpString[2:] + } + + return string(utf16.Decode(s)), nil +} diff --git a/vendor/golang.org/x/crypto/pkcs12/crypto.go b/vendor/golang.org/x/crypto/pkcs12/crypto.go new file mode 100644 index 000000000..96f4a1a56 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/crypto.go @@ -0,0 +1,131 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "crypto/cipher" + "crypto/des" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + + "golang.org/x/crypto/pkcs12/internal/rc2" +) + +var ( + oidPBEWithSHAAnd3KeyTripleDESCBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 3}) + oidPBEWithSHAAnd40BitRC2CBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 6}) +) + +// pbeCipher is an abstraction of a PKCS#12 cipher. +type pbeCipher interface { + // create returns a cipher.Block given a key. + create(key []byte) (cipher.Block, error) + // deriveKey returns a key derived from the given password and salt. + deriveKey(salt, password []byte, iterations int) []byte + // deriveKey returns an IV derived from the given password and salt. + deriveIV(salt, password []byte, iterations int) []byte +} + +type shaWithTripleDESCBC struct{} + +func (shaWithTripleDESCBC) create(key []byte) (cipher.Block, error) { + return des.NewTripleDESCipher(key) +} + +func (shaWithTripleDESCBC) deriveKey(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 24) +} + +func (shaWithTripleDESCBC) deriveIV(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) +} + +type shaWith40BitRC2CBC struct{} + +func (shaWith40BitRC2CBC) create(key []byte) (cipher.Block, error) { + return rc2.New(key, len(key)*8) +} + +func (shaWith40BitRC2CBC) deriveKey(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 5) +} + +func (shaWith40BitRC2CBC) deriveIV(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) +} + +type pbeParams struct { + Salt []byte + Iterations int +} + +func pbDecrypterFor(algorithm pkix.AlgorithmIdentifier, password []byte) (cipher.BlockMode, int, error) { + var cipherType pbeCipher + + switch { + case algorithm.Algorithm.Equal(oidPBEWithSHAAnd3KeyTripleDESCBC): + cipherType = shaWithTripleDESCBC{} + case algorithm.Algorithm.Equal(oidPBEWithSHAAnd40BitRC2CBC): + cipherType = shaWith40BitRC2CBC{} + default: + return nil, 0, NotImplementedError("algorithm " + algorithm.Algorithm.String() + " is not supported") + } + + var params pbeParams + if err := unmarshal(algorithm.Parameters.FullBytes, ¶ms); err != nil { + return nil, 0, err + } + + key := cipherType.deriveKey(params.Salt, password, params.Iterations) + iv := cipherType.deriveIV(params.Salt, password, params.Iterations) + + block, err := cipherType.create(key) + if err != nil { + return nil, 0, err + } + + return cipher.NewCBCDecrypter(block, iv), block.BlockSize(), nil +} + +func pbDecrypt(info decryptable, password []byte) (decrypted []byte, err error) { + cbc, blockSize, err := pbDecrypterFor(info.Algorithm(), password) + if err != nil { + return nil, err + } + + encrypted := info.Data() + if len(encrypted) == 0 { + return nil, errors.New("pkcs12: empty encrypted data") + } + if len(encrypted)%blockSize != 0 { + return nil, errors.New("pkcs12: input is not a multiple of the block size") + } + decrypted = make([]byte, len(encrypted)) + cbc.CryptBlocks(decrypted, encrypted) + + psLen := int(decrypted[len(decrypted)-1]) + if psLen == 0 || psLen > blockSize { + return nil, ErrDecryption + } + + if len(decrypted) < psLen { + return nil, ErrDecryption + } + ps := decrypted[len(decrypted)-psLen:] + decrypted = decrypted[:len(decrypted)-psLen] + if !bytes.Equal(ps, bytes.Repeat([]byte{byte(psLen)}, psLen)) { + return nil, ErrDecryption + } + + return +} + +// decryptable abstracts an object that contains ciphertext. +type decryptable interface { + Algorithm() pkix.AlgorithmIdentifier + Data() []byte +} diff --git a/vendor/golang.org/x/crypto/pkcs12/errors.go b/vendor/golang.org/x/crypto/pkcs12/errors.go new file mode 100644 index 000000000..7377ce6fb --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/errors.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import "errors" + +var ( + // ErrDecryption represents a failure to decrypt the input. + ErrDecryption = errors.New("pkcs12: decryption error, incorrect padding") + + // ErrIncorrectPassword is returned when an incorrect password is detected. + // Usually, P12/PFX data is signed to be able to verify the password. + ErrIncorrectPassword = errors.New("pkcs12: decryption password incorrect") +) + +// NotImplementedError indicates that the input is not currently supported. +type NotImplementedError string + +func (e NotImplementedError) Error() string { + return "pkcs12: " + string(e) +} diff --git a/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go new file mode 100644 index 000000000..05de9cc2c --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go @@ -0,0 +1,268 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rc2 implements the RC2 cipher +/* +https://www.ietf.org/rfc/rfc2268.txt +http://people.csail.mit.edu/rivest/pubs/KRRR98.pdf + +This code is licensed under the MIT license. +*/ +package rc2 + +import ( + "crypto/cipher" + "encoding/binary" + "math/bits" +) + +// The rc2 block size in bytes +const BlockSize = 8 + +type rc2Cipher struct { + k [64]uint16 +} + +// New returns a new rc2 cipher with the given key and effective key length t1 +func New(key []byte, t1 int) (cipher.Block, error) { + // TODO(dgryski): error checking for key length + return &rc2Cipher{ + k: expandKey(key, t1), + }, nil +} + +func (*rc2Cipher) BlockSize() int { return BlockSize } + +var piTable = [256]byte{ + 0xd9, 0x78, 0xf9, 0xc4, 0x19, 0xdd, 0xb5, 0xed, 0x28, 0xe9, 0xfd, 0x79, 0x4a, 0xa0, 0xd8, 0x9d, + 0xc6, 0x7e, 0x37, 0x83, 0x2b, 0x76, 0x53, 0x8e, 0x62, 0x4c, 0x64, 0x88, 0x44, 0x8b, 0xfb, 0xa2, + 0x17, 0x9a, 0x59, 0xf5, 0x87, 0xb3, 0x4f, 0x13, 0x61, 0x45, 0x6d, 0x8d, 0x09, 0x81, 0x7d, 0x32, + 0xbd, 0x8f, 0x40, 0xeb, 0x86, 0xb7, 0x7b, 0x0b, 0xf0, 0x95, 0x21, 0x22, 0x5c, 0x6b, 0x4e, 0x82, + 0x54, 0xd6, 0x65, 0x93, 0xce, 0x60, 0xb2, 0x1c, 0x73, 0x56, 0xc0, 0x14, 0xa7, 0x8c, 0xf1, 0xdc, + 0x12, 0x75, 0xca, 0x1f, 0x3b, 0xbe, 0xe4, 0xd1, 0x42, 0x3d, 0xd4, 0x30, 0xa3, 0x3c, 0xb6, 0x26, + 0x6f, 0xbf, 0x0e, 0xda, 0x46, 0x69, 0x07, 0x57, 0x27, 0xf2, 0x1d, 0x9b, 0xbc, 0x94, 0x43, 0x03, + 0xf8, 0x11, 0xc7, 0xf6, 0x90, 0xef, 0x3e, 0xe7, 0x06, 0xc3, 0xd5, 0x2f, 0xc8, 0x66, 0x1e, 0xd7, + 0x08, 0xe8, 0xea, 0xde, 0x80, 0x52, 0xee, 0xf7, 0x84, 0xaa, 0x72, 0xac, 0x35, 0x4d, 0x6a, 0x2a, + 0x96, 0x1a, 0xd2, 0x71, 0x5a, 0x15, 0x49, 0x74, 0x4b, 0x9f, 0xd0, 0x5e, 0x04, 0x18, 0xa4, 0xec, + 0xc2, 0xe0, 0x41, 0x6e, 0x0f, 0x51, 0xcb, 0xcc, 0x24, 0x91, 0xaf, 0x50, 0xa1, 0xf4, 0x70, 0x39, + 0x99, 0x7c, 0x3a, 0x85, 0x23, 0xb8, 0xb4, 0x7a, 0xfc, 0x02, 0x36, 0x5b, 0x25, 0x55, 0x97, 0x31, + 0x2d, 0x5d, 0xfa, 0x98, 0xe3, 0x8a, 0x92, 0xae, 0x05, 0xdf, 0x29, 0x10, 0x67, 0x6c, 0xba, 0xc9, + 0xd3, 0x00, 0xe6, 0xcf, 0xe1, 0x9e, 0xa8, 0x2c, 0x63, 0x16, 0x01, 0x3f, 0x58, 0xe2, 0x89, 0xa9, + 0x0d, 0x38, 0x34, 0x1b, 0xab, 0x33, 0xff, 0xb0, 0xbb, 0x48, 0x0c, 0x5f, 0xb9, 0xb1, 0xcd, 0x2e, + 0xc5, 0xf3, 0xdb, 0x47, 0xe5, 0xa5, 0x9c, 0x77, 0x0a, 0xa6, 0x20, 0x68, 0xfe, 0x7f, 0xc1, 0xad, +} + +func expandKey(key []byte, t1 int) [64]uint16 { + + l := make([]byte, 128) + copy(l, key) + + var t = len(key) + var t8 = (t1 + 7) / 8 + var tm = byte(255 % uint(1<<(8+uint(t1)-8*uint(t8)))) + + for i := len(key); i < 128; i++ { + l[i] = piTable[l[i-1]+l[uint8(i-t)]] + } + + l[128-t8] = piTable[l[128-t8]&tm] + + for i := 127 - t8; i >= 0; i-- { + l[i] = piTable[l[i+1]^l[i+t8]] + } + + var k [64]uint16 + + for i := range k { + k[i] = uint16(l[2*i]) + uint16(l[2*i+1])*256 + } + + return k +} + +func (c *rc2Cipher) Encrypt(dst, src []byte) { + + r0 := binary.LittleEndian.Uint16(src[0:]) + r1 := binary.LittleEndian.Uint16(src[2:]) + r2 := binary.LittleEndian.Uint16(src[4:]) + r3 := binary.LittleEndian.Uint16(src[6:]) + + var j int + + for j <= 16 { + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = bits.RotateLeft16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = bits.RotateLeft16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = bits.RotateLeft16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = bits.RotateLeft16(r3, 5) + j++ + + } + + r0 = r0 + c.k[r3&63] + r1 = r1 + c.k[r0&63] + r2 = r2 + c.k[r1&63] + r3 = r3 + c.k[r2&63] + + for j <= 40 { + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = bits.RotateLeft16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = bits.RotateLeft16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = bits.RotateLeft16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = bits.RotateLeft16(r3, 5) + j++ + + } + + r0 = r0 + c.k[r3&63] + r1 = r1 + c.k[r0&63] + r2 = r2 + c.k[r1&63] + r3 = r3 + c.k[r2&63] + + for j <= 60 { + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = bits.RotateLeft16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = bits.RotateLeft16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = bits.RotateLeft16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = bits.RotateLeft16(r3, 5) + j++ + } + + binary.LittleEndian.PutUint16(dst[0:], r0) + binary.LittleEndian.PutUint16(dst[2:], r1) + binary.LittleEndian.PutUint16(dst[4:], r2) + binary.LittleEndian.PutUint16(dst[6:], r3) +} + +func (c *rc2Cipher) Decrypt(dst, src []byte) { + + r0 := binary.LittleEndian.Uint16(src[0:]) + r1 := binary.LittleEndian.Uint16(src[2:]) + r2 := binary.LittleEndian.Uint16(src[4:]) + r3 := binary.LittleEndian.Uint16(src[6:]) + + j := 63 + + for j >= 44 { + // unmix r3 + r3 = bits.RotateLeft16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = bits.RotateLeft16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = bits.RotateLeft16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = bits.RotateLeft16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + } + + r3 = r3 - c.k[r2&63] + r2 = r2 - c.k[r1&63] + r1 = r1 - c.k[r0&63] + r0 = r0 - c.k[r3&63] + + for j >= 20 { + // unmix r3 + r3 = bits.RotateLeft16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = bits.RotateLeft16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = bits.RotateLeft16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = bits.RotateLeft16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + + } + + r3 = r3 - c.k[r2&63] + r2 = r2 - c.k[r1&63] + r1 = r1 - c.k[r0&63] + r0 = r0 - c.k[r3&63] + + for j >= 0 { + // unmix r3 + r3 = bits.RotateLeft16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = bits.RotateLeft16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = bits.RotateLeft16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = bits.RotateLeft16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + + } + + binary.LittleEndian.PutUint16(dst[0:], r0) + binary.LittleEndian.PutUint16(dst[2:], r1) + binary.LittleEndian.PutUint16(dst[4:], r2) + binary.LittleEndian.PutUint16(dst[6:], r3) +} diff --git a/vendor/golang.org/x/crypto/pkcs12/mac.go b/vendor/golang.org/x/crypto/pkcs12/mac.go new file mode 100644 index 000000000..5f38aa7de --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/mac.go @@ -0,0 +1,45 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/x509/pkix" + "encoding/asn1" +) + +type macData struct { + Mac digestInfo + MacSalt []byte + Iterations int `asn1:"optional,default:1"` +} + +// from PKCS#7: +type digestInfo struct { + Algorithm pkix.AlgorithmIdentifier + Digest []byte +} + +var ( + oidSHA1 = asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}) +) + +func verifyMac(macData *macData, message, password []byte) error { + if !macData.Mac.Algorithm.Algorithm.Equal(oidSHA1) { + return NotImplementedError("unknown digest algorithm: " + macData.Mac.Algorithm.Algorithm.String()) + } + + key := pbkdf(sha1Sum, 20, 64, macData.MacSalt, password, macData.Iterations, 3, 20) + + mac := hmac.New(sha1.New, key) + mac.Write(message) + expectedMAC := mac.Sum(nil) + + if !hmac.Equal(macData.Mac.Digest, expectedMAC) { + return ErrIncorrectPassword + } + return nil +} diff --git a/vendor/golang.org/x/crypto/pkcs12/pbkdf.go b/vendor/golang.org/x/crypto/pkcs12/pbkdf.go new file mode 100644 index 000000000..5c419d41e --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/pbkdf.go @@ -0,0 +1,170 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "crypto/sha1" + "math/big" +) + +var ( + one = big.NewInt(1) +) + +// sha1Sum returns the SHA-1 hash of in. +func sha1Sum(in []byte) []byte { + sum := sha1.Sum(in) + return sum[:] +} + +// fillWithRepeats returns v*ceiling(len(pattern) / v) bytes consisting of +// repeats of pattern. +func fillWithRepeats(pattern []byte, v int) []byte { + if len(pattern) == 0 { + return nil + } + outputLen := v * ((len(pattern) + v - 1) / v) + return bytes.Repeat(pattern, (outputLen+len(pattern)-1)/len(pattern))[:outputLen] +} + +func pbkdf(hash func([]byte) []byte, u, v int, salt, password []byte, r int, ID byte, size int) (key []byte) { + // implementation of https://tools.ietf.org/html/rfc7292#appendix-B.2 , RFC text verbatim in comments + + // Let H be a hash function built around a compression function f: + + // Z_2^u x Z_2^v -> Z_2^u + + // (that is, H has a chaining variable and output of length u bits, and + // the message input to the compression function of H is v bits). The + // values for u and v are as follows: + + // HASH FUNCTION VALUE u VALUE v + // MD2, MD5 128 512 + // SHA-1 160 512 + // SHA-224 224 512 + // SHA-256 256 512 + // SHA-384 384 1024 + // SHA-512 512 1024 + // SHA-512/224 224 1024 + // SHA-512/256 256 1024 + + // Furthermore, let r be the iteration count. + + // We assume here that u and v are both multiples of 8, as are the + // lengths of the password and salt strings (which we denote by p and s, + // respectively) and the number n of pseudorandom bits required. In + // addition, u and v are of course non-zero. + + // For information on security considerations for MD5 [19], see [25] and + // [1], and on those for MD2, see [18]. + + // The following procedure can be used to produce pseudorandom bits for + // a particular "purpose" that is identified by a byte called "ID". + // This standard specifies 3 different values for the ID byte: + + // 1. If ID=1, then the pseudorandom bits being produced are to be used + // as key material for performing encryption or decryption. + + // 2. If ID=2, then the pseudorandom bits being produced are to be used + // as an IV (Initial Value) for encryption or decryption. + + // 3. If ID=3, then the pseudorandom bits being produced are to be used + // as an integrity key for MACing. + + // 1. Construct a string, D (the "diversifier"), by concatenating v/8 + // copies of ID. + var D []byte + for i := 0; i < v; i++ { + D = append(D, ID) + } + + // 2. Concatenate copies of the salt together to create a string S of + // length v(ceiling(s/v)) bits (the final copy of the salt may be + // truncated to create S). Note that if the salt is the empty + // string, then so is S. + + S := fillWithRepeats(salt, v) + + // 3. Concatenate copies of the password together to create a string P + // of length v(ceiling(p/v)) bits (the final copy of the password + // may be truncated to create P). Note that if the password is the + // empty string, then so is P. + + P := fillWithRepeats(password, v) + + // 4. Set I=S||P to be the concatenation of S and P. + I := append(S, P...) + + // 5. Set c=ceiling(n/u). + c := (size + u - 1) / u + + // 6. For i=1, 2, ..., c, do the following: + A := make([]byte, c*20) + var IjBuf []byte + for i := 0; i < c; i++ { + // A. Set A2=H^r(D||I). (i.e., the r-th hash of D||1, + // H(H(H(... H(D||I)))) + Ai := hash(append(D, I...)) + for j := 1; j < r; j++ { + Ai = hash(Ai) + } + copy(A[i*20:], Ai[:]) + + if i < c-1 { // skip on last iteration + // B. Concatenate copies of Ai to create a string B of length v + // bits (the final copy of Ai may be truncated to create B). + var B []byte + for len(B) < v { + B = append(B, Ai[:]...) + } + B = B[:v] + + // C. Treating I as a concatenation I_0, I_1, ..., I_(k-1) of v-bit + // blocks, where k=ceiling(s/v)+ceiling(p/v), modify I by + // setting I_j=(I_j+B+1) mod 2^v for each j. + { + Bbi := new(big.Int).SetBytes(B) + Ij := new(big.Int) + + for j := 0; j < len(I)/v; j++ { + Ij.SetBytes(I[j*v : (j+1)*v]) + Ij.Add(Ij, Bbi) + Ij.Add(Ij, one) + Ijb := Ij.Bytes() + // We expect Ijb to be exactly v bytes, + // if it is longer or shorter we must + // adjust it accordingly. + if len(Ijb) > v { + Ijb = Ijb[len(Ijb)-v:] + } + if len(Ijb) < v { + if IjBuf == nil { + IjBuf = make([]byte, v) + } + bytesShort := v - len(Ijb) + for i := 0; i < bytesShort; i++ { + IjBuf[i] = 0 + } + copy(IjBuf[bytesShort:], Ijb) + Ijb = IjBuf + } + copy(I[j*v:(j+1)*v], Ijb) + } + } + } + } + // 7. Concatenate A_1, A_2, ..., A_c together to form a pseudorandom + // bit string, A. + + // 8. Use the first n bits of A as the output of this entire process. + return A[:size] + + // If the above process is being used to generate a DES key, the process + // should be used to create 64 random bits, and the key's parity bits + // should be set after the 64 bits have been produced. Similar concerns + // hold for 2-key and 3-key triple-DES keys, for CDMF keys, and for any + // similar keys with parity bits "built into them". +} diff --git a/vendor/golang.org/x/crypto/pkcs12/pkcs12.go b/vendor/golang.org/x/crypto/pkcs12/pkcs12.go new file mode 100644 index 000000000..3a89bdb3e --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/pkcs12.go @@ -0,0 +1,360 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkcs12 implements some of PKCS#12. +// +// This implementation is distilled from https://tools.ietf.org/html/rfc7292 +// and referenced documents. It is intended for decoding P12/PFX-stored +// certificates and keys for use with the crypto/tls package. +// +// This package is frozen. If it's missing functionality you need, consider +// an alternative like software.sslmate.com/src/go-pkcs12. +package pkcs12 + +import ( + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/hex" + "encoding/pem" + "errors" +) + +var ( + oidDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 1}) + oidEncryptedDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 6}) + + oidFriendlyName = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 20}) + oidLocalKeyID = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 21}) + oidMicrosoftCSPName = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 4, 1, 311, 17, 1}) + + errUnknownAttributeOID = errors.New("pkcs12: unknown attribute OID") +) + +type pfxPdu struct { + Version int + AuthSafe contentInfo + MacData macData `asn1:"optional"` +} + +type contentInfo struct { + ContentType asn1.ObjectIdentifier + Content asn1.RawValue `asn1:"tag:0,explicit,optional"` +} + +type encryptedData struct { + Version int + EncryptedContentInfo encryptedContentInfo +} + +type encryptedContentInfo struct { + ContentType asn1.ObjectIdentifier + ContentEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedContent []byte `asn1:"tag:0,optional"` +} + +func (i encryptedContentInfo) Algorithm() pkix.AlgorithmIdentifier { + return i.ContentEncryptionAlgorithm +} + +func (i encryptedContentInfo) Data() []byte { return i.EncryptedContent } + +type safeBag struct { + Id asn1.ObjectIdentifier + Value asn1.RawValue `asn1:"tag:0,explicit"` + Attributes []pkcs12Attribute `asn1:"set,optional"` +} + +type pkcs12Attribute struct { + Id asn1.ObjectIdentifier + Value asn1.RawValue `asn1:"set"` +} + +type encryptedPrivateKeyInfo struct { + AlgorithmIdentifier pkix.AlgorithmIdentifier + EncryptedData []byte +} + +func (i encryptedPrivateKeyInfo) Algorithm() pkix.AlgorithmIdentifier { + return i.AlgorithmIdentifier +} + +func (i encryptedPrivateKeyInfo) Data() []byte { + return i.EncryptedData +} + +// PEM block types +const ( + certificateType = "CERTIFICATE" + privateKeyType = "PRIVATE KEY" +) + +// unmarshal calls asn1.Unmarshal, but also returns an error if there is any +// trailing data after unmarshaling. +func unmarshal(in []byte, out interface{}) error { + trailing, err := asn1.Unmarshal(in, out) + if err != nil { + return err + } + if len(trailing) != 0 { + return errors.New("pkcs12: trailing data found") + } + return nil +} + +// ToPEM converts all "safe bags" contained in pfxData to PEM blocks. +// Unknown attributes are discarded. +// +// Note that although the returned PEM blocks for private keys have type +// "PRIVATE KEY", the bytes are not encoded according to PKCS #8, but according +// to PKCS #1 for RSA keys and SEC 1 for ECDSA keys. +func ToPEM(pfxData []byte, password string) ([]*pem.Block, error) { + encodedPassword, err := bmpString(password) + if err != nil { + return nil, ErrIncorrectPassword + } + + bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) + + if err != nil { + return nil, err + } + + blocks := make([]*pem.Block, 0, len(bags)) + for _, bag := range bags { + block, err := convertBag(&bag, encodedPassword) + if err != nil { + return nil, err + } + blocks = append(blocks, block) + } + + return blocks, nil +} + +func convertBag(bag *safeBag, password []byte) (*pem.Block, error) { + block := &pem.Block{ + Headers: make(map[string]string), + } + + for _, attribute := range bag.Attributes { + k, v, err := convertAttribute(&attribute) + if err == errUnknownAttributeOID { + continue + } + if err != nil { + return nil, err + } + block.Headers[k] = v + } + + switch { + case bag.Id.Equal(oidCertBag): + block.Type = certificateType + certsData, err := decodeCertBag(bag.Value.Bytes) + if err != nil { + return nil, err + } + block.Bytes = certsData + case bag.Id.Equal(oidPKCS8ShroundedKeyBag): + block.Type = privateKeyType + + key, err := decodePkcs8ShroudedKeyBag(bag.Value.Bytes, password) + if err != nil { + return nil, err + } + + switch key := key.(type) { + case *rsa.PrivateKey: + block.Bytes = x509.MarshalPKCS1PrivateKey(key) + case *ecdsa.PrivateKey: + block.Bytes, err = x509.MarshalECPrivateKey(key) + if err != nil { + return nil, err + } + default: + return nil, errors.New("found unknown private key type in PKCS#8 wrapping") + } + default: + return nil, errors.New("don't know how to convert a safe bag of type " + bag.Id.String()) + } + return block, nil +} + +func convertAttribute(attribute *pkcs12Attribute) (key, value string, err error) { + isString := false + + switch { + case attribute.Id.Equal(oidFriendlyName): + key = "friendlyName" + isString = true + case attribute.Id.Equal(oidLocalKeyID): + key = "localKeyId" + case attribute.Id.Equal(oidMicrosoftCSPName): + // This key is chosen to match OpenSSL. + key = "Microsoft CSP Name" + isString = true + default: + return "", "", errUnknownAttributeOID + } + + if isString { + if err := unmarshal(attribute.Value.Bytes, &attribute.Value); err != nil { + return "", "", err + } + if value, err = decodeBMPString(attribute.Value.Bytes); err != nil { + return "", "", err + } + } else { + var id []byte + if err := unmarshal(attribute.Value.Bytes, &id); err != nil { + return "", "", err + } + value = hex.EncodeToString(id) + } + + return key, value, nil +} + +// Decode extracts a certificate and private key from pfxData. This function +// assumes that there is only one certificate and only one private key in the +// pfxData; if there are more use ToPEM instead. +func Decode(pfxData []byte, password string) (privateKey interface{}, certificate *x509.Certificate, err error) { + encodedPassword, err := bmpString(password) + if err != nil { + return nil, nil, err + } + + bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) + if err != nil { + return nil, nil, err + } + + if len(bags) != 2 { + err = errors.New("pkcs12: expected exactly two safe bags in the PFX PDU") + return + } + + for _, bag := range bags { + switch { + case bag.Id.Equal(oidCertBag): + if certificate != nil { + err = errors.New("pkcs12: expected exactly one certificate bag") + } + + certsData, err := decodeCertBag(bag.Value.Bytes) + if err != nil { + return nil, nil, err + } + certs, err := x509.ParseCertificates(certsData) + if err != nil { + return nil, nil, err + } + if len(certs) != 1 { + err = errors.New("pkcs12: expected exactly one certificate in the certBag") + return nil, nil, err + } + certificate = certs[0] + + case bag.Id.Equal(oidPKCS8ShroundedKeyBag): + if privateKey != nil { + err = errors.New("pkcs12: expected exactly one key bag") + return nil, nil, err + } + + if privateKey, err = decodePkcs8ShroudedKeyBag(bag.Value.Bytes, encodedPassword); err != nil { + return nil, nil, err + } + } + } + + if certificate == nil { + return nil, nil, errors.New("pkcs12: certificate missing") + } + if privateKey == nil { + return nil, nil, errors.New("pkcs12: private key missing") + } + + return +} + +func getSafeContents(p12Data, password []byte) (bags []safeBag, updatedPassword []byte, err error) { + pfx := new(pfxPdu) + if err := unmarshal(p12Data, pfx); err != nil { + return nil, nil, errors.New("pkcs12: error reading P12 data: " + err.Error()) + } + + if pfx.Version != 3 { + return nil, nil, NotImplementedError("can only decode v3 PFX PDU's") + } + + if !pfx.AuthSafe.ContentType.Equal(oidDataContentType) { + return nil, nil, NotImplementedError("only password-protected PFX is implemented") + } + + // unmarshal the explicit bytes in the content for type 'data' + if err := unmarshal(pfx.AuthSafe.Content.Bytes, &pfx.AuthSafe.Content); err != nil { + return nil, nil, err + } + + if len(pfx.MacData.Mac.Algorithm.Algorithm) == 0 { + return nil, nil, errors.New("pkcs12: no MAC in data") + } + + if err := verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password); err != nil { + if err == ErrIncorrectPassword && len(password) == 2 && password[0] == 0 && password[1] == 0 { + // some implementations use an empty byte array + // for the empty string password try one more + // time with empty-empty password + password = nil + err = verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password) + } + if err != nil { + return nil, nil, err + } + } + + var authenticatedSafe []contentInfo + if err := unmarshal(pfx.AuthSafe.Content.Bytes, &authenticatedSafe); err != nil { + return nil, nil, err + } + + if len(authenticatedSafe) != 2 { + return nil, nil, NotImplementedError("expected exactly two items in the authenticated safe") + } + + for _, ci := range authenticatedSafe { + var data []byte + + switch { + case ci.ContentType.Equal(oidDataContentType): + if err := unmarshal(ci.Content.Bytes, &data); err != nil { + return nil, nil, err + } + case ci.ContentType.Equal(oidEncryptedDataContentType): + var encryptedData encryptedData + if err := unmarshal(ci.Content.Bytes, &encryptedData); err != nil { + return nil, nil, err + } + if encryptedData.Version != 0 { + return nil, nil, NotImplementedError("only version 0 of EncryptedData is supported") + } + if data, err = pbDecrypt(encryptedData.EncryptedContentInfo, password); err != nil { + return nil, nil, err + } + default: + return nil, nil, NotImplementedError("only data and encryptedData content types are supported in authenticated safe") + } + + var safeContents []safeBag + if err := unmarshal(data, &safeContents); err != nil { + return nil, nil, err + } + bags = append(bags, safeContents...) + } + + return bags, password, nil +} diff --git a/vendor/golang.org/x/crypto/pkcs12/safebags.go b/vendor/golang.org/x/crypto/pkcs12/safebags.go new file mode 100644 index 000000000..def1f7b98 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/safebags.go @@ -0,0 +1,57 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "crypto/x509" + "encoding/asn1" + "errors" +) + +var ( + // see https://tools.ietf.org/html/rfc7292#appendix-D + oidCertTypeX509Certificate = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 22, 1}) + oidPKCS8ShroundedKeyBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 2}) + oidCertBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 3}) +) + +type certBag struct { + Id asn1.ObjectIdentifier + Data []byte `asn1:"tag:0,explicit"` +} + +func decodePkcs8ShroudedKeyBag(asn1Data, password []byte) (privateKey interface{}, err error) { + pkinfo := new(encryptedPrivateKeyInfo) + if err = unmarshal(asn1Data, pkinfo); err != nil { + return nil, errors.New("pkcs12: error decoding PKCS#8 shrouded key bag: " + err.Error()) + } + + pkData, err := pbDecrypt(pkinfo, password) + if err != nil { + return nil, errors.New("pkcs12: error decrypting PKCS#8 shrouded key bag: " + err.Error()) + } + + ret := new(asn1.RawValue) + if err = unmarshal(pkData, ret); err != nil { + return nil, errors.New("pkcs12: error unmarshaling decrypted private key: " + err.Error()) + } + + if privateKey, err = x509.ParsePKCS8PrivateKey(pkData); err != nil { + return nil, errors.New("pkcs12: error parsing PKCS#8 private key: " + err.Error()) + } + + return privateKey, nil +} + +func decodeCertBag(asn1Data []byte) (x509Certificates []byte, err error) { + bag := new(certBag) + if err := unmarshal(asn1Data, bag); err != nil { + return nil, errors.New("pkcs12: error decoding cert bag: " + err.Error()) + } + if !bag.Id.Equal(oidCertTypeX509Certificate) { + return nil, NotImplementedError("only X509 certificates are supported") + } + return bag.Data, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/buffer.go b/vendor/golang.org/x/crypto/ssh/buffer.go new file mode 100644 index 000000000..1ab07d078 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/buffer.go @@ -0,0 +1,97 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "io" + "sync" +) + +// buffer provides a linked list buffer for data exchange +// between producer and consumer. Theoretically the buffer is +// of unlimited capacity as it does no allocation of its own. +type buffer struct { + // protects concurrent access to head, tail and closed + *sync.Cond + + head *element // the buffer that will be read first + tail *element // the buffer that will be read last + + closed bool +} + +// An element represents a single link in a linked list. +type element struct { + buf []byte + next *element +} + +// newBuffer returns an empty buffer that is not closed. +func newBuffer() *buffer { + e := new(element) + b := &buffer{ + Cond: newCond(), + head: e, + tail: e, + } + return b +} + +// write makes buf available for Read to receive. +// buf must not be modified after the call to write. +func (b *buffer) write(buf []byte) { + b.Cond.L.Lock() + e := &element{buf: buf} + b.tail.next = e + b.tail = e + b.Cond.Signal() + b.Cond.L.Unlock() +} + +// eof closes the buffer. Reads from the buffer once all +// the data has been consumed will receive io.EOF. +func (b *buffer) eof() { + b.Cond.L.Lock() + b.closed = true + b.Cond.Signal() + b.Cond.L.Unlock() +} + +// Read reads data from the internal buffer in buf. Reads will block +// if no data is available, or until the buffer is closed. +func (b *buffer) Read(buf []byte) (n int, err error) { + b.Cond.L.Lock() + defer b.Cond.L.Unlock() + + for len(buf) > 0 { + // if there is data in b.head, copy it + if len(b.head.buf) > 0 { + r := copy(buf, b.head.buf) + buf, b.head.buf = buf[r:], b.head.buf[r:] + n += r + continue + } + // if there is a next buffer, make it the head + if len(b.head.buf) == 0 && b.head != b.tail { + b.head = b.head.next + continue + } + + // if at least one byte has been copied, return + if n > 0 { + break + } + + // if nothing was read, and there is nothing outstanding + // check to see if the buffer is closed. + if b.closed { + err = io.EOF + break + } + // out of buffers, wait for producer + b.Cond.Wait() + } + return +} diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go new file mode 100644 index 000000000..27d0e14aa --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/certs.go @@ -0,0 +1,611 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "io" + "net" + "sort" + "time" +) + +// Certificate algorithm names from [PROTOCOL.certkeys]. These values can appear +// in Certificate.Type, PublicKey.Type, and ClientConfig.HostKeyAlgorithms. +// Unlike key algorithm names, these are not passed to AlgorithmSigner nor +// returned by MultiAlgorithmSigner and don't appear in the Signature.Format +// field. +const ( + CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" + CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" + CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com" + CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com" + CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com" + CertAlgoSKECDSA256v01 = "sk-ecdsa-sha2-nistp256-cert-v01@openssh.com" + CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" + CertAlgoSKED25519v01 = "sk-ssh-ed25519-cert-v01@openssh.com" + + // CertAlgoRSASHA256v01 and CertAlgoRSASHA512v01 can't appear as a + // Certificate.Type (or PublicKey.Type), but only in + // ClientConfig.HostKeyAlgorithms. + CertAlgoRSASHA256v01 = "rsa-sha2-256-cert-v01@openssh.com" + CertAlgoRSASHA512v01 = "rsa-sha2-512-cert-v01@openssh.com" +) + +const ( + // Deprecated: use CertAlgoRSAv01. + CertSigAlgoRSAv01 = CertAlgoRSAv01 + // Deprecated: use CertAlgoRSASHA256v01. + CertSigAlgoRSASHA2256v01 = CertAlgoRSASHA256v01 + // Deprecated: use CertAlgoRSASHA512v01. + CertSigAlgoRSASHA2512v01 = CertAlgoRSASHA512v01 +) + +// Certificate types distinguish between host and user +// certificates. The values can be set in the CertType field of +// Certificate. +const ( + UserCert = 1 + HostCert = 2 +) + +// Signature represents a cryptographic signature. +type Signature struct { + Format string + Blob []byte + Rest []byte `ssh:"rest"` +} + +// CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that +// a certificate does not expire. +const CertTimeInfinity = 1<<64 - 1 + +// An Certificate represents an OpenSSH certificate as defined in +// [PROTOCOL.certkeys]?rev=1.8. The Certificate type implements the +// PublicKey interface, so it can be unmarshaled using +// ParsePublicKey. +type Certificate struct { + Nonce []byte + Key PublicKey + Serial uint64 + CertType uint32 + KeyId string + ValidPrincipals []string + ValidAfter uint64 + ValidBefore uint64 + Permissions + Reserved []byte + SignatureKey PublicKey + Signature *Signature +} + +// genericCertData holds the key-independent part of the certificate data. +// Overall, certificates contain an nonce, public key fields and +// key-independent fields. +type genericCertData struct { + Serial uint64 + CertType uint32 + KeyId string + ValidPrincipals []byte + ValidAfter uint64 + ValidBefore uint64 + CriticalOptions []byte + Extensions []byte + Reserved []byte + SignatureKey []byte + Signature []byte +} + +func marshalStringList(namelist []string) []byte { + var to []byte + for _, name := range namelist { + s := struct{ N string }{name} + to = append(to, Marshal(&s)...) + } + return to +} + +type optionsTuple struct { + Key string + Value []byte +} + +type optionsTupleValue struct { + Value string +} + +// serialize a map of critical options or extensions +// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, +// we need two length prefixes for a non-empty string value +func marshalTuples(tups map[string]string) []byte { + keys := make([]string, 0, len(tups)) + for key := range tups { + keys = append(keys, key) + } + sort.Strings(keys) + + var ret []byte + for _, key := range keys { + s := optionsTuple{Key: key} + if value := tups[key]; len(value) > 0 { + s.Value = Marshal(&optionsTupleValue{value}) + } + ret = append(ret, Marshal(&s)...) + } + return ret +} + +// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, +// we need two length prefixes for a non-empty option value +func parseTuples(in []byte) (map[string]string, error) { + tups := map[string]string{} + var lastKey string + var haveLastKey bool + + for len(in) > 0 { + var key, val, extra []byte + var ok bool + + if key, in, ok = parseString(in); !ok { + return nil, errShortRead + } + keyStr := string(key) + // according to [PROTOCOL.certkeys], the names must be in + // lexical order. + if haveLastKey && keyStr <= lastKey { + return nil, fmt.Errorf("ssh: certificate options are not in lexical order") + } + lastKey, haveLastKey = keyStr, true + // the next field is a data field, which if non-empty has a string embedded + if val, in, ok = parseString(in); !ok { + return nil, errShortRead + } + if len(val) > 0 { + val, extra, ok = parseString(val) + if !ok { + return nil, errShortRead + } + if len(extra) > 0 { + return nil, fmt.Errorf("ssh: unexpected trailing data after certificate option value") + } + tups[keyStr] = string(val) + } else { + tups[keyStr] = "" + } + } + return tups, nil +} + +func parseCert(in []byte, privAlgo string) (*Certificate, error) { + nonce, rest, ok := parseString(in) + if !ok { + return nil, errShortRead + } + + key, rest, err := parsePubKey(rest, privAlgo) + if err != nil { + return nil, err + } + + var g genericCertData + if err := Unmarshal(rest, &g); err != nil { + return nil, err + } + + c := &Certificate{ + Nonce: nonce, + Key: key, + Serial: g.Serial, + CertType: g.CertType, + KeyId: g.KeyId, + ValidAfter: g.ValidAfter, + ValidBefore: g.ValidBefore, + } + + for principals := g.ValidPrincipals; len(principals) > 0; { + principal, rest, ok := parseString(principals) + if !ok { + return nil, errShortRead + } + c.ValidPrincipals = append(c.ValidPrincipals, string(principal)) + principals = rest + } + + c.CriticalOptions, err = parseTuples(g.CriticalOptions) + if err != nil { + return nil, err + } + c.Extensions, err = parseTuples(g.Extensions) + if err != nil { + return nil, err + } + c.Reserved = g.Reserved + k, err := ParsePublicKey(g.SignatureKey) + if err != nil { + return nil, err + } + + c.SignatureKey = k + c.Signature, rest, ok = parseSignatureBody(g.Signature) + if !ok || len(rest) > 0 { + return nil, errors.New("ssh: signature parse error") + } + + return c, nil +} + +type openSSHCertSigner struct { + pub *Certificate + signer Signer +} + +type algorithmOpenSSHCertSigner struct { + *openSSHCertSigner + algorithmSigner AlgorithmSigner +} + +// NewCertSigner returns a Signer that signs with the given Certificate, whose +// private key is held by signer. It returns an error if the public key in cert +// doesn't match the key used by signer. +func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) { + if !bytes.Equal(cert.Key.Marshal(), signer.PublicKey().Marshal()) { + return nil, errors.New("ssh: signer and cert have different public key") + } + + switch s := signer.(type) { + case MultiAlgorithmSigner: + return &multiAlgorithmSigner{ + AlgorithmSigner: &algorithmOpenSSHCertSigner{ + &openSSHCertSigner{cert, signer}, s}, + supportedAlgorithms: s.Algorithms(), + }, nil + case AlgorithmSigner: + return &algorithmOpenSSHCertSigner{ + &openSSHCertSigner{cert, signer}, s}, nil + default: + return &openSSHCertSigner{cert, signer}, nil + } +} + +func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { + return s.signer.Sign(rand, data) +} + +func (s *openSSHCertSigner) PublicKey() PublicKey { + return s.pub +} + +func (s *algorithmOpenSSHCertSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { + return s.algorithmSigner.SignWithAlgorithm(rand, data, algorithm) +} + +const sourceAddressCriticalOption = "source-address" + +// CertChecker does the work of verifying a certificate. Its methods +// can be plugged into ClientConfig.HostKeyCallback and +// ServerConfig.PublicKeyCallback. For the CertChecker to work, +// minimally, the IsAuthority callback should be set. +type CertChecker struct { + // SupportedCriticalOptions lists the CriticalOptions that the + // server application layer understands. These are only used + // for user certificates. + SupportedCriticalOptions []string + + // IsUserAuthority should return true if the key is recognized as an + // authority for the given user certificate. This allows for + // certificates to be signed by other certificates. This must be set + // if this CertChecker will be checking user certificates. + IsUserAuthority func(auth PublicKey) bool + + // IsHostAuthority should report whether the key is recognized as + // an authority for this host. This allows for certificates to be + // signed by other keys, and for those other keys to only be valid + // signers for particular hostnames. This must be set if this + // CertChecker will be checking host certificates. + IsHostAuthority func(auth PublicKey, address string) bool + + // Clock is used for verifying time stamps. If nil, time.Now + // is used. + Clock func() time.Time + + // UserKeyFallback is called when CertChecker.Authenticate encounters a + // public key that is not a certificate. It must implement validation + // of user keys or else, if nil, all such keys are rejected. + UserKeyFallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) + + // HostKeyFallback is called when CertChecker.CheckHostKey encounters a + // public key that is not a certificate. It must implement host key + // validation or else, if nil, all such keys are rejected. + HostKeyFallback HostKeyCallback + + // IsRevoked is called for each certificate so that revocation checking + // can be implemented. It should return true if the given certificate + // is revoked and false otherwise. If nil, no certificates are + // considered to have been revoked. + IsRevoked func(cert *Certificate) bool +} + +// CheckHostKey checks a host key certificate. This method can be +// plugged into ClientConfig.HostKeyCallback. +func (c *CertChecker) CheckHostKey(addr string, remote net.Addr, key PublicKey) error { + cert, ok := key.(*Certificate) + if !ok { + if c.HostKeyFallback != nil { + return c.HostKeyFallback(addr, remote, key) + } + return errors.New("ssh: non-certificate host key") + } + if cert.CertType != HostCert { + return fmt.Errorf("ssh: certificate presented as a host key has type %d", cert.CertType) + } + if !c.IsHostAuthority(cert.SignatureKey, addr) { + return fmt.Errorf("ssh: no authorities for hostname: %v", addr) + } + + hostname, _, err := net.SplitHostPort(addr) + if err != nil { + return err + } + + // Pass hostname only as principal for host certificates (consistent with OpenSSH) + return c.CheckCert(hostname, cert) +} + +// Authenticate checks a user certificate. Authenticate can be used as +// a value for ServerConfig.PublicKeyCallback. +func (c *CertChecker) Authenticate(conn ConnMetadata, pubKey PublicKey) (*Permissions, error) { + cert, ok := pubKey.(*Certificate) + if !ok { + if c.UserKeyFallback != nil { + return c.UserKeyFallback(conn, pubKey) + } + return nil, errors.New("ssh: normal key pairs not accepted") + } + + if cert.CertType != UserCert { + return nil, fmt.Errorf("ssh: cert has type %d", cert.CertType) + } + if !c.IsUserAuthority(cert.SignatureKey) { + return nil, fmt.Errorf("ssh: certificate signed by unrecognized authority") + } + + if err := c.CheckCert(conn.User(), cert); err != nil { + return nil, err + } + + return &cert.Permissions, nil +} + +// CheckCert checks CriticalOptions, ValidPrincipals, revocation, timestamp and +// the signature of the certificate. +func (c *CertChecker) CheckCert(principal string, cert *Certificate) error { + if c.IsRevoked != nil && c.IsRevoked(cert) { + return fmt.Errorf("ssh: certificate serial %d revoked", cert.Serial) + } + + for opt := range cert.CriticalOptions { + // sourceAddressCriticalOption will be enforced by + // serverAuthenticate + if opt == sourceAddressCriticalOption { + continue + } + + found := false + for _, supp := range c.SupportedCriticalOptions { + if supp == opt { + found = true + break + } + } + if !found { + return fmt.Errorf("ssh: unsupported critical option %q in certificate", opt) + } + } + + if len(cert.ValidPrincipals) > 0 { + // By default, certs are valid for all users/hosts. + found := false + for _, p := range cert.ValidPrincipals { + if p == principal { + found = true + break + } + } + if !found { + return fmt.Errorf("ssh: principal %q not in the set of valid principals for given certificate: %q", principal, cert.ValidPrincipals) + } + } + + clock := c.Clock + if clock == nil { + clock = time.Now + } + + unixNow := clock().Unix() + if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) { + return fmt.Errorf("ssh: cert is not yet valid") + } + if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) { + return fmt.Errorf("ssh: cert has expired") + } + if err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil { + return fmt.Errorf("ssh: certificate signature does not verify") + } + + return nil +} + +// SignCert signs the certificate with an authority, setting the Nonce, +// SignatureKey, and Signature fields. If the authority implements the +// MultiAlgorithmSigner interface the first algorithm in the list is used. This +// is useful if you want to sign with a specific algorithm. +func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { + c.Nonce = make([]byte, 32) + if _, err := io.ReadFull(rand, c.Nonce); err != nil { + return err + } + c.SignatureKey = authority.PublicKey() + + if v, ok := authority.(MultiAlgorithmSigner); ok { + if len(v.Algorithms()) == 0 { + return errors.New("the provided authority has no signature algorithm") + } + // Use the first algorithm in the list. + sig, err := v.SignWithAlgorithm(rand, c.bytesForSigning(), v.Algorithms()[0]) + if err != nil { + return err + } + c.Signature = sig + return nil + } else if v, ok := authority.(AlgorithmSigner); ok && v.PublicKey().Type() == KeyAlgoRSA { + // Default to KeyAlgoRSASHA512 for ssh-rsa signers. + // TODO: consider using KeyAlgoRSASHA256 as default. + sig, err := v.SignWithAlgorithm(rand, c.bytesForSigning(), KeyAlgoRSASHA512) + if err != nil { + return err + } + c.Signature = sig + return nil + } + + sig, err := authority.Sign(rand, c.bytesForSigning()) + if err != nil { + return err + } + c.Signature = sig + return nil +} + +// certKeyAlgoNames is a mapping from known certificate algorithm names to the +// corresponding public key signature algorithm. +// +// This map must be kept in sync with the one in agent/client.go. +var certKeyAlgoNames = map[string]string{ + CertAlgoRSAv01: KeyAlgoRSA, + CertAlgoRSASHA256v01: KeyAlgoRSASHA256, + CertAlgoRSASHA512v01: KeyAlgoRSASHA512, + CertAlgoDSAv01: KeyAlgoDSA, + CertAlgoECDSA256v01: KeyAlgoECDSA256, + CertAlgoECDSA384v01: KeyAlgoECDSA384, + CertAlgoECDSA521v01: KeyAlgoECDSA521, + CertAlgoSKECDSA256v01: KeyAlgoSKECDSA256, + CertAlgoED25519v01: KeyAlgoED25519, + CertAlgoSKED25519v01: KeyAlgoSKED25519, +} + +// underlyingAlgo returns the signature algorithm associated with algo (which is +// an advertised or negotiated public key or host key algorithm). These are +// usually the same, except for certificate algorithms. +func underlyingAlgo(algo string) string { + if a, ok := certKeyAlgoNames[algo]; ok { + return a + } + return algo +} + +// certificateAlgo returns the certificate algorithms that uses the provided +// underlying signature algorithm. +func certificateAlgo(algo string) (certAlgo string, ok bool) { + for certName, algoName := range certKeyAlgoNames { + if algoName == algo { + return certName, true + } + } + return "", false +} + +func (cert *Certificate) bytesForSigning() []byte { + c2 := *cert + c2.Signature = nil + out := c2.Marshal() + // Drop trailing signature length. + return out[:len(out)-4] +} + +// Marshal serializes c into OpenSSH's wire format. It is part of the +// PublicKey interface. +func (c *Certificate) Marshal() []byte { + generic := genericCertData{ + Serial: c.Serial, + CertType: c.CertType, + KeyId: c.KeyId, + ValidPrincipals: marshalStringList(c.ValidPrincipals), + ValidAfter: uint64(c.ValidAfter), + ValidBefore: uint64(c.ValidBefore), + CriticalOptions: marshalTuples(c.CriticalOptions), + Extensions: marshalTuples(c.Extensions), + Reserved: c.Reserved, + SignatureKey: c.SignatureKey.Marshal(), + } + if c.Signature != nil { + generic.Signature = Marshal(c.Signature) + } + genericBytes := Marshal(&generic) + keyBytes := c.Key.Marshal() + _, keyBytes, _ = parseString(keyBytes) + prefix := Marshal(&struct { + Name string + Nonce []byte + Key []byte `ssh:"rest"` + }{c.Type(), c.Nonce, keyBytes}) + + result := make([]byte, 0, len(prefix)+len(genericBytes)) + result = append(result, prefix...) + result = append(result, genericBytes...) + return result +} + +// Type returns the certificate algorithm name. It is part of the PublicKey interface. +func (c *Certificate) Type() string { + certName, ok := certificateAlgo(c.Key.Type()) + if !ok { + panic("unknown certificate type for key type " + c.Key.Type()) + } + return certName +} + +// Verify verifies a signature against the certificate's public +// key. It is part of the PublicKey interface. +func (c *Certificate) Verify(data []byte, sig *Signature) error { + return c.Key.Verify(data, sig) +} + +func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) { + format, in, ok := parseString(in) + if !ok { + return + } + + out = &Signature{ + Format: string(format), + } + + if out.Blob, in, ok = parseString(in); !ok { + return + } + + switch out.Format { + case KeyAlgoSKECDSA256, CertAlgoSKECDSA256v01, KeyAlgoSKED25519, CertAlgoSKED25519v01: + out.Rest = in + return out, nil, ok + } + + return out, in, ok +} + +func parseSignature(in []byte) (out *Signature, rest []byte, ok bool) { + sigBytes, rest, ok := parseString(in) + if !ok { + return + } + + out, trailing, ok := parseSignatureBody(sigBytes) + if !ok || len(trailing) > 0 { + return nil, nil, false + } + return +} diff --git a/vendor/golang.org/x/crypto/ssh/channel.go b/vendor/golang.org/x/crypto/ssh/channel.go new file mode 100644 index 000000000..cc0bb7ab6 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/channel.go @@ -0,0 +1,645 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "log" + "sync" +) + +const ( + minPacketLength = 9 + // channelMaxPacket contains the maximum number of bytes that will be + // sent in a single packet. As per RFC 4253, section 6.1, 32k is also + // the minimum. + channelMaxPacket = 1 << 15 + // We follow OpenSSH here. + channelWindowSize = 64 * channelMaxPacket +) + +// NewChannel represents an incoming request to a channel. It must either be +// accepted for use by calling Accept, or rejected by calling Reject. +type NewChannel interface { + // Accept accepts the channel creation request. It returns the Channel + // and a Go channel containing SSH requests. The Go channel must be + // serviced otherwise the Channel will hang. + Accept() (Channel, <-chan *Request, error) + + // Reject rejects the channel creation request. After calling + // this, no other methods on the Channel may be called. + Reject(reason RejectionReason, message string) error + + // ChannelType returns the type of the channel, as supplied by the + // client. + ChannelType() string + + // ExtraData returns the arbitrary payload for this channel, as supplied + // by the client. This data is specific to the channel type. + ExtraData() []byte +} + +// A Channel is an ordered, reliable, flow-controlled, duplex stream +// that is multiplexed over an SSH connection. +type Channel interface { + // Read reads up to len(data) bytes from the channel. + Read(data []byte) (int, error) + + // Write writes len(data) bytes to the channel. + Write(data []byte) (int, error) + + // Close signals end of channel use. No data may be sent after this + // call. + Close() error + + // CloseWrite signals the end of sending in-band + // data. Requests may still be sent, and the other side may + // still send data + CloseWrite() error + + // SendRequest sends a channel request. If wantReply is true, + // it will wait for a reply and return the result as a + // boolean, otherwise the return value will be false. Channel + // requests are out-of-band messages so they may be sent even + // if the data stream is closed or blocked by flow control. + // If the channel is closed before a reply is returned, io.EOF + // is returned. + SendRequest(name string, wantReply bool, payload []byte) (bool, error) + + // Stderr returns an io.ReadWriter that writes to this channel + // with the extended data type set to stderr. Stderr may + // safely be read and written from a different goroutine than + // Read and Write respectively. + Stderr() io.ReadWriter +} + +// Request is a request sent outside of the normal stream of +// data. Requests can either be specific to an SSH channel, or they +// can be global. +type Request struct { + Type string + WantReply bool + Payload []byte + + ch *channel + mux *mux +} + +// Reply sends a response to a request. It must be called for all requests +// where WantReply is true and is a no-op otherwise. The payload argument is +// ignored for replies to channel-specific requests. +func (r *Request) Reply(ok bool, payload []byte) error { + if !r.WantReply { + return nil + } + + if r.ch == nil { + return r.mux.ackRequest(ok, payload) + } + + return r.ch.ackRequest(ok) +} + +// RejectionReason is an enumeration used when rejecting channel creation +// requests. See RFC 4254, section 5.1. +type RejectionReason uint32 + +const ( + Prohibited RejectionReason = iota + 1 + ConnectionFailed + UnknownChannelType + ResourceShortage +) + +// String converts the rejection reason to human readable form. +func (r RejectionReason) String() string { + switch r { + case Prohibited: + return "administratively prohibited" + case ConnectionFailed: + return "connect failed" + case UnknownChannelType: + return "unknown channel type" + case ResourceShortage: + return "resource shortage" + } + return fmt.Sprintf("unknown reason %d", int(r)) +} + +func min(a uint32, b int) uint32 { + if a < uint32(b) { + return a + } + return uint32(b) +} + +type channelDirection uint8 + +const ( + channelInbound channelDirection = iota + channelOutbound +) + +// channel is an implementation of the Channel interface that works +// with the mux class. +type channel struct { + // R/O after creation + chanType string + extraData []byte + localId, remoteId uint32 + + // maxIncomingPayload and maxRemotePayload are the maximum + // payload sizes of normal and extended data packets for + // receiving and sending, respectively. The wire packet will + // be 9 or 13 bytes larger (excluding encryption overhead). + maxIncomingPayload uint32 + maxRemotePayload uint32 + + mux *mux + + // decided is set to true if an accept or reject message has been sent + // (for outbound channels) or received (for inbound channels). + decided bool + + // direction contains either channelOutbound, for channels created + // locally, or channelInbound, for channels created by the peer. + direction channelDirection + + // Pending internal channel messages. + msg chan interface{} + + // Since requests have no ID, there can be only one request + // with WantReply=true outstanding. This lock is held by a + // goroutine that has such an outgoing request pending. + sentRequestMu sync.Mutex + + incomingRequests chan *Request + + sentEOF bool + + // thread-safe data + remoteWin window + pending *buffer + extPending *buffer + + // windowMu protects myWindow, the flow-control window, and myConsumed, + // the number of bytes consumed since we last increased myWindow + windowMu sync.Mutex + myWindow uint32 + myConsumed uint32 + + // writeMu serializes calls to mux.conn.writePacket() and + // protects sentClose and packetPool. This mutex must be + // different from windowMu, as writePacket can block if there + // is a key exchange pending. + writeMu sync.Mutex + sentClose bool + + // packetPool has a buffer for each extended channel ID to + // save allocations during writes. + packetPool map[uint32][]byte +} + +// writePacket sends a packet. If the packet is a channel close, it updates +// sentClose. This method takes the lock c.writeMu. +func (ch *channel) writePacket(packet []byte) error { + ch.writeMu.Lock() + if ch.sentClose { + ch.writeMu.Unlock() + return io.EOF + } + ch.sentClose = (packet[0] == msgChannelClose) + err := ch.mux.conn.writePacket(packet) + ch.writeMu.Unlock() + return err +} + +func (ch *channel) sendMessage(msg interface{}) error { + if debugMux { + log.Printf("send(%d): %#v", ch.mux.chanList.offset, msg) + } + + p := Marshal(msg) + binary.BigEndian.PutUint32(p[1:], ch.remoteId) + return ch.writePacket(p) +} + +// WriteExtended writes data to a specific extended stream. These streams are +// used, for example, for stderr. +func (ch *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) { + if ch.sentEOF { + return 0, io.EOF + } + // 1 byte message type, 4 bytes remoteId, 4 bytes data length + opCode := byte(msgChannelData) + headerLength := uint32(9) + if extendedCode > 0 { + headerLength += 4 + opCode = msgChannelExtendedData + } + + ch.writeMu.Lock() + packet := ch.packetPool[extendedCode] + // We don't remove the buffer from packetPool, so + // WriteExtended calls from different goroutines will be + // flagged as errors by the race detector. + ch.writeMu.Unlock() + + for len(data) > 0 { + space := min(ch.maxRemotePayload, len(data)) + if space, err = ch.remoteWin.reserve(space); err != nil { + return n, err + } + if want := headerLength + space; uint32(cap(packet)) < want { + packet = make([]byte, want) + } else { + packet = packet[:want] + } + + todo := data[:space] + + packet[0] = opCode + binary.BigEndian.PutUint32(packet[1:], ch.remoteId) + if extendedCode > 0 { + binary.BigEndian.PutUint32(packet[5:], uint32(extendedCode)) + } + binary.BigEndian.PutUint32(packet[headerLength-4:], uint32(len(todo))) + copy(packet[headerLength:], todo) + if err = ch.writePacket(packet); err != nil { + return n, err + } + + n += len(todo) + data = data[len(todo):] + } + + ch.writeMu.Lock() + ch.packetPool[extendedCode] = packet + ch.writeMu.Unlock() + + return n, err +} + +func (ch *channel) handleData(packet []byte) error { + headerLen := 9 + isExtendedData := packet[0] == msgChannelExtendedData + if isExtendedData { + headerLen = 13 + } + if len(packet) < headerLen { + // malformed data packet + return parseError(packet[0]) + } + + var extended uint32 + if isExtendedData { + extended = binary.BigEndian.Uint32(packet[5:]) + } + + length := binary.BigEndian.Uint32(packet[headerLen-4 : headerLen]) + if length == 0 { + return nil + } + if length > ch.maxIncomingPayload { + // TODO(hanwen): should send Disconnect? + return errors.New("ssh: incoming packet exceeds maximum payload size") + } + + data := packet[headerLen:] + if length != uint32(len(data)) { + return errors.New("ssh: wrong packet length") + } + + ch.windowMu.Lock() + if ch.myWindow < length { + ch.windowMu.Unlock() + // TODO(hanwen): should send Disconnect with reason? + return errors.New("ssh: remote side wrote too much") + } + ch.myWindow -= length + ch.windowMu.Unlock() + + if extended == 1 { + ch.extPending.write(data) + } else if extended > 0 { + // discard other extended data. + } else { + ch.pending.write(data) + } + return nil +} + +func (c *channel) adjustWindow(adj uint32) error { + c.windowMu.Lock() + // Since myConsumed and myWindow are managed on our side, and can never + // exceed the initial window setting, we don't worry about overflow. + c.myConsumed += adj + var sendAdj uint32 + if (channelWindowSize-c.myWindow > 3*c.maxIncomingPayload) || + (c.myWindow < channelWindowSize/2) { + sendAdj = c.myConsumed + c.myConsumed = 0 + c.myWindow += sendAdj + } + c.windowMu.Unlock() + if sendAdj == 0 { + return nil + } + return c.sendMessage(windowAdjustMsg{ + AdditionalBytes: sendAdj, + }) +} + +func (c *channel) ReadExtended(data []byte, extended uint32) (n int, err error) { + switch extended { + case 1: + n, err = c.extPending.Read(data) + case 0: + n, err = c.pending.Read(data) + default: + return 0, fmt.Errorf("ssh: extended code %d unimplemented", extended) + } + + if n > 0 { + err = c.adjustWindow(uint32(n)) + // sendWindowAdjust can return io.EOF if the remote + // peer has closed the connection, however we want to + // defer forwarding io.EOF to the caller of Read until + // the buffer has been drained. + if n > 0 && err == io.EOF { + err = nil + } + } + + return n, err +} + +func (c *channel) close() { + c.pending.eof() + c.extPending.eof() + close(c.msg) + close(c.incomingRequests) + c.writeMu.Lock() + // This is not necessary for a normal channel teardown, but if + // there was another error, it is. + c.sentClose = true + c.writeMu.Unlock() + // Unblock writers. + c.remoteWin.close() +} + +// responseMessageReceived is called when a success or failure message is +// received on a channel to check that such a message is reasonable for the +// given channel. +func (ch *channel) responseMessageReceived() error { + if ch.direction == channelInbound { + return errors.New("ssh: channel response message received on inbound channel") + } + if ch.decided { + return errors.New("ssh: duplicate response received for channel") + } + ch.decided = true + return nil +} + +func (ch *channel) handlePacket(packet []byte) error { + switch packet[0] { + case msgChannelData, msgChannelExtendedData: + return ch.handleData(packet) + case msgChannelClose: + ch.sendMessage(channelCloseMsg{PeersID: ch.remoteId}) + ch.mux.chanList.remove(ch.localId) + ch.close() + return nil + case msgChannelEOF: + // RFC 4254 is mute on how EOF affects dataExt messages but + // it is logical to signal EOF at the same time. + ch.extPending.eof() + ch.pending.eof() + return nil + } + + decoded, err := decode(packet) + if err != nil { + return err + } + + switch msg := decoded.(type) { + case *channelOpenFailureMsg: + if err := ch.responseMessageReceived(); err != nil { + return err + } + ch.mux.chanList.remove(msg.PeersID) + ch.msg <- msg + case *channelOpenConfirmMsg: + if err := ch.responseMessageReceived(); err != nil { + return err + } + if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { + return fmt.Errorf("ssh: invalid MaxPacketSize %d from peer", msg.MaxPacketSize) + } + ch.remoteId = msg.MyID + ch.maxRemotePayload = msg.MaxPacketSize + ch.remoteWin.add(msg.MyWindow) + ch.msg <- msg + case *windowAdjustMsg: + if !ch.remoteWin.add(msg.AdditionalBytes) { + return fmt.Errorf("ssh: invalid window update for %d bytes", msg.AdditionalBytes) + } + case *channelRequestMsg: + req := Request{ + Type: msg.Request, + WantReply: msg.WantReply, + Payload: msg.RequestSpecificData, + ch: ch, + } + + ch.incomingRequests <- &req + default: + ch.msg <- msg + } + return nil +} + +func (m *mux) newChannel(chanType string, direction channelDirection, extraData []byte) *channel { + ch := &channel{ + remoteWin: window{Cond: newCond()}, + myWindow: channelWindowSize, + pending: newBuffer(), + extPending: newBuffer(), + direction: direction, + incomingRequests: make(chan *Request, chanSize), + msg: make(chan interface{}, chanSize), + chanType: chanType, + extraData: extraData, + mux: m, + packetPool: make(map[uint32][]byte), + } + ch.localId = m.chanList.add(ch) + return ch +} + +var errUndecided = errors.New("ssh: must Accept or Reject channel") +var errDecidedAlready = errors.New("ssh: can call Accept or Reject only once") + +type extChannel struct { + code uint32 + ch *channel +} + +func (e *extChannel) Write(data []byte) (n int, err error) { + return e.ch.WriteExtended(data, e.code) +} + +func (e *extChannel) Read(data []byte) (n int, err error) { + return e.ch.ReadExtended(data, e.code) +} + +func (ch *channel) Accept() (Channel, <-chan *Request, error) { + if ch.decided { + return nil, nil, errDecidedAlready + } + ch.maxIncomingPayload = channelMaxPacket + confirm := channelOpenConfirmMsg{ + PeersID: ch.remoteId, + MyID: ch.localId, + MyWindow: ch.myWindow, + MaxPacketSize: ch.maxIncomingPayload, + } + ch.decided = true + if err := ch.sendMessage(confirm); err != nil { + return nil, nil, err + } + + return ch, ch.incomingRequests, nil +} + +func (ch *channel) Reject(reason RejectionReason, message string) error { + if ch.decided { + return errDecidedAlready + } + reject := channelOpenFailureMsg{ + PeersID: ch.remoteId, + Reason: reason, + Message: message, + Language: "en", + } + ch.decided = true + return ch.sendMessage(reject) +} + +func (ch *channel) Read(data []byte) (int, error) { + if !ch.decided { + return 0, errUndecided + } + return ch.ReadExtended(data, 0) +} + +func (ch *channel) Write(data []byte) (int, error) { + if !ch.decided { + return 0, errUndecided + } + return ch.WriteExtended(data, 0) +} + +func (ch *channel) CloseWrite() error { + if !ch.decided { + return errUndecided + } + ch.sentEOF = true + return ch.sendMessage(channelEOFMsg{ + PeersID: ch.remoteId}) +} + +func (ch *channel) Close() error { + if !ch.decided { + return errUndecided + } + + return ch.sendMessage(channelCloseMsg{ + PeersID: ch.remoteId}) +} + +// Extended returns an io.ReadWriter that sends and receives data on the given, +// SSH extended stream. Such streams are used, for example, for stderr. +func (ch *channel) Extended(code uint32) io.ReadWriter { + if !ch.decided { + return nil + } + return &extChannel{code, ch} +} + +func (ch *channel) Stderr() io.ReadWriter { + return ch.Extended(1) +} + +func (ch *channel) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { + if !ch.decided { + return false, errUndecided + } + + if wantReply { + ch.sentRequestMu.Lock() + defer ch.sentRequestMu.Unlock() + } + + msg := channelRequestMsg{ + PeersID: ch.remoteId, + Request: name, + WantReply: wantReply, + RequestSpecificData: payload, + } + + if err := ch.sendMessage(msg); err != nil { + return false, err + } + + if wantReply { + m, ok := (<-ch.msg) + if !ok { + return false, io.EOF + } + switch m.(type) { + case *channelRequestFailureMsg: + return false, nil + case *channelRequestSuccessMsg: + return true, nil + default: + return false, fmt.Errorf("ssh: unexpected response to channel request: %#v", m) + } + } + + return false, nil +} + +// ackRequest either sends an ack or nack to the channel request. +func (ch *channel) ackRequest(ok bool) error { + if !ch.decided { + return errUndecided + } + + var msg interface{} + if !ok { + msg = channelRequestFailureMsg{ + PeersID: ch.remoteId, + } + } else { + msg = channelRequestSuccessMsg{ + PeersID: ch.remoteId, + } + } + return ch.sendMessage(msg) +} + +func (ch *channel) ChannelType() string { + return ch.chanType +} + +func (ch *channel) ExtraData() []byte { + return ch.extraData +} diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go new file mode 100644 index 000000000..741e984f3 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/cipher.go @@ -0,0 +1,789 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/rc4" + "crypto/subtle" + "encoding/binary" + "errors" + "fmt" + "hash" + "io" + + "golang.org/x/crypto/chacha20" + "golang.org/x/crypto/internal/poly1305" +) + +const ( + packetSizeMultiple = 16 // TODO(huin) this should be determined by the cipher. + + // RFC 4253 section 6.1 defines a minimum packet size of 32768 that implementations + // MUST be able to process (plus a few more kilobytes for padding and mac). The RFC + // indicates implementations SHOULD be able to handle larger packet sizes, but then + // waffles on about reasonable limits. + // + // OpenSSH caps their maxPacket at 256kB so we choose to do + // the same. maxPacket is also used to ensure that uint32 + // length fields do not overflow, so it should remain well + // below 4G. + maxPacket = 256 * 1024 +) + +// noneCipher implements cipher.Stream and provides no encryption. It is used +// by the transport before the first key-exchange. +type noneCipher struct{} + +func (c noneCipher) XORKeyStream(dst, src []byte) { + copy(dst, src) +} + +func newAESCTR(key, iv []byte) (cipher.Stream, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + return cipher.NewCTR(c, iv), nil +} + +func newRC4(key, iv []byte) (cipher.Stream, error) { + return rc4.NewCipher(key) +} + +type cipherMode struct { + keySize int + ivSize int + create func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) +} + +func streamCipherMode(skip int, createFunc func(key, iv []byte) (cipher.Stream, error)) func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + return func(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + stream, err := createFunc(key, iv) + if err != nil { + return nil, err + } + + var streamDump []byte + if skip > 0 { + streamDump = make([]byte, 512) + } + + for remainingToDump := skip; remainingToDump > 0; { + dumpThisTime := remainingToDump + if dumpThisTime > len(streamDump) { + dumpThisTime = len(streamDump) + } + stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime]) + remainingToDump -= dumpThisTime + } + + mac := macModes[algs.MAC].new(macKey) + return &streamPacketCipher{ + mac: mac, + etm: macModes[algs.MAC].etm, + macResult: make([]byte, mac.Size()), + cipher: stream, + }, nil + } +} + +// cipherModes documents properties of supported ciphers. Ciphers not included +// are not supported and will not be negotiated, even if explicitly requested in +// ClientConfig.Crypto.Ciphers. +var cipherModes = map[string]*cipherMode{ + // Ciphers from RFC 4344, which introduced many CTR-based ciphers. Algorithms + // are defined in the order specified in the RFC. + "aes128-ctr": {16, aes.BlockSize, streamCipherMode(0, newAESCTR)}, + "aes192-ctr": {24, aes.BlockSize, streamCipherMode(0, newAESCTR)}, + "aes256-ctr": {32, aes.BlockSize, streamCipherMode(0, newAESCTR)}, + + // Ciphers from RFC 4345, which introduces security-improved arcfour ciphers. + // They are defined in the order specified in the RFC. + "arcfour128": {16, 0, streamCipherMode(1536, newRC4)}, + "arcfour256": {32, 0, streamCipherMode(1536, newRC4)}, + + // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol. + // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and + // RC4) has problems with weak keys, and should be used with caution." + // RFC 4345 introduces improved versions of Arcfour. + "arcfour": {16, 0, streamCipherMode(0, newRC4)}, + + // AEAD ciphers + gcm128CipherID: {16, 12, newGCMCipher}, + gcm256CipherID: {32, 12, newGCMCipher}, + chacha20Poly1305ID: {64, 0, newChaCha20Cipher}, + + // CBC mode is insecure and so is not included in the default config. + // (See https://www.ieee-security.org/TC/SP2013/papers/4977a526.pdf). If absolutely + // needed, it's possible to specify a custom Config to enable it. + // You should expect that an active attacker can recover plaintext if + // you do. + aes128cbcID: {16, aes.BlockSize, newAESCBCCipher}, + + // 3des-cbc is insecure and is not included in the default + // config. + tripledescbcID: {24, des.BlockSize, newTripleDESCBCCipher}, +} + +// prefixLen is the length of the packet prefix that contains the packet length +// and number of padding bytes. +const prefixLen = 5 + +// streamPacketCipher is a packetCipher using a stream cipher. +type streamPacketCipher struct { + mac hash.Hash + cipher cipher.Stream + etm bool + + // The following members are to avoid per-packet allocations. + prefix [prefixLen]byte + seqNumBytes [4]byte + padding [2 * packetSizeMultiple]byte + packetData []byte + macResult []byte +} + +// readCipherPacket reads and decrypt a single packet from the reader argument. +func (s *streamPacketCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { + if _, err := io.ReadFull(r, s.prefix[:]); err != nil { + return nil, err + } + + var encryptedPaddingLength [1]byte + if s.mac != nil && s.etm { + copy(encryptedPaddingLength[:], s.prefix[4:5]) + s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) + } else { + s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) + } + + length := binary.BigEndian.Uint32(s.prefix[0:4]) + paddingLength := uint32(s.prefix[4]) + + var macSize uint32 + if s.mac != nil { + s.mac.Reset() + binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) + s.mac.Write(s.seqNumBytes[:]) + if s.etm { + s.mac.Write(s.prefix[:4]) + s.mac.Write(encryptedPaddingLength[:]) + } else { + s.mac.Write(s.prefix[:]) + } + macSize = uint32(s.mac.Size()) + } + + if length <= paddingLength+1 { + return nil, errors.New("ssh: invalid packet length, packet too small") + } + + if length > maxPacket { + return nil, errors.New("ssh: invalid packet length, packet too large") + } + + // the maxPacket check above ensures that length-1+macSize + // does not overflow. + if uint32(cap(s.packetData)) < length-1+macSize { + s.packetData = make([]byte, length-1+macSize) + } else { + s.packetData = s.packetData[:length-1+macSize] + } + + if _, err := io.ReadFull(r, s.packetData); err != nil { + return nil, err + } + mac := s.packetData[length-1:] + data := s.packetData[:length-1] + + if s.mac != nil && s.etm { + s.mac.Write(data) + } + + s.cipher.XORKeyStream(data, data) + + if s.mac != nil { + if !s.etm { + s.mac.Write(data) + } + s.macResult = s.mac.Sum(s.macResult[:0]) + if subtle.ConstantTimeCompare(s.macResult, mac) != 1 { + return nil, errors.New("ssh: MAC failure") + } + } + + return s.packetData[:length-paddingLength-1], nil +} + +// writeCipherPacket encrypts and sends a packet of data to the writer argument +func (s *streamPacketCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { + if len(packet) > maxPacket { + return errors.New("ssh: packet too large") + } + + aadlen := 0 + if s.mac != nil && s.etm { + // packet length is not encrypted for EtM modes + aadlen = 4 + } + + paddingLength := packetSizeMultiple - (prefixLen+len(packet)-aadlen)%packetSizeMultiple + if paddingLength < 4 { + paddingLength += packetSizeMultiple + } + + length := len(packet) + 1 + paddingLength + binary.BigEndian.PutUint32(s.prefix[:], uint32(length)) + s.prefix[4] = byte(paddingLength) + padding := s.padding[:paddingLength] + if _, err := io.ReadFull(rand, padding); err != nil { + return err + } + + if s.mac != nil { + s.mac.Reset() + binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) + s.mac.Write(s.seqNumBytes[:]) + + if s.etm { + // For EtM algorithms, the packet length must stay unencrypted, + // but the following data (padding length) must be encrypted + s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) + } + + s.mac.Write(s.prefix[:]) + + if !s.etm { + // For non-EtM algorithms, the algorithm is applied on unencrypted data + s.mac.Write(packet) + s.mac.Write(padding) + } + } + + if !(s.mac != nil && s.etm) { + // For EtM algorithms, the padding length has already been encrypted + // and the packet length must remain unencrypted + s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) + } + + s.cipher.XORKeyStream(packet, packet) + s.cipher.XORKeyStream(padding, padding) + + if s.mac != nil && s.etm { + // For EtM algorithms, packet and padding must be encrypted + s.mac.Write(packet) + s.mac.Write(padding) + } + + if _, err := w.Write(s.prefix[:]); err != nil { + return err + } + if _, err := w.Write(packet); err != nil { + return err + } + if _, err := w.Write(padding); err != nil { + return err + } + + if s.mac != nil { + s.macResult = s.mac.Sum(s.macResult[:0]) + if _, err := w.Write(s.macResult); err != nil { + return err + } + } + + return nil +} + +type gcmCipher struct { + aead cipher.AEAD + prefix [4]byte + iv []byte + buf []byte +} + +func newGCMCipher(key, iv, unusedMacKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + aead, err := cipher.NewGCM(c) + if err != nil { + return nil, err + } + + return &gcmCipher{ + aead: aead, + iv: iv, + }, nil +} + +const gcmTagSize = 16 + +func (c *gcmCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { + // Pad out to multiple of 16 bytes. This is different from the + // stream cipher because that encrypts the length too. + padding := byte(packetSizeMultiple - (1+len(packet))%packetSizeMultiple) + if padding < 4 { + padding += packetSizeMultiple + } + + length := uint32(len(packet) + int(padding) + 1) + binary.BigEndian.PutUint32(c.prefix[:], length) + if _, err := w.Write(c.prefix[:]); err != nil { + return err + } + + if cap(c.buf) < int(length) { + c.buf = make([]byte, length) + } else { + c.buf = c.buf[:length] + } + + c.buf[0] = padding + copy(c.buf[1:], packet) + if _, err := io.ReadFull(rand, c.buf[1+len(packet):]); err != nil { + return err + } + c.buf = c.aead.Seal(c.buf[:0], c.iv, c.buf, c.prefix[:]) + if _, err := w.Write(c.buf); err != nil { + return err + } + c.incIV() + + return nil +} + +func (c *gcmCipher) incIV() { + for i := 4 + 7; i >= 4; i-- { + c.iv[i]++ + if c.iv[i] != 0 { + break + } + } +} + +func (c *gcmCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { + if _, err := io.ReadFull(r, c.prefix[:]); err != nil { + return nil, err + } + length := binary.BigEndian.Uint32(c.prefix[:]) + if length > maxPacket { + return nil, errors.New("ssh: max packet length exceeded") + } + + if cap(c.buf) < int(length+gcmTagSize) { + c.buf = make([]byte, length+gcmTagSize) + } else { + c.buf = c.buf[:length+gcmTagSize] + } + + if _, err := io.ReadFull(r, c.buf); err != nil { + return nil, err + } + + plain, err := c.aead.Open(c.buf[:0], c.iv, c.buf, c.prefix[:]) + if err != nil { + return nil, err + } + c.incIV() + + if len(plain) == 0 { + return nil, errors.New("ssh: empty packet") + } + + padding := plain[0] + if padding < 4 { + // padding is a byte, so it automatically satisfies + // the maximum size, which is 255. + return nil, fmt.Errorf("ssh: illegal padding %d", padding) + } + + if int(padding+1) >= len(plain) { + return nil, fmt.Errorf("ssh: padding %d too large", padding) + } + plain = plain[1 : length-uint32(padding)] + return plain, nil +} + +// cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1 +type cbcCipher struct { + mac hash.Hash + macSize uint32 + decrypter cipher.BlockMode + encrypter cipher.BlockMode + + // The following members are to avoid per-packet allocations. + seqNumBytes [4]byte + packetData []byte + macResult []byte + + // Amount of data we should still read to hide which + // verification error triggered. + oracleCamouflage uint32 +} + +func newCBCCipher(c cipher.Block, key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + cbc := &cbcCipher{ + mac: macModes[algs.MAC].new(macKey), + decrypter: cipher.NewCBCDecrypter(c, iv), + encrypter: cipher.NewCBCEncrypter(c, iv), + packetData: make([]byte, 1024), + } + if cbc.mac != nil { + cbc.macSize = uint32(cbc.mac.Size()) + } + + return cbc, nil +} + +func newAESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + cbc, err := newCBCCipher(c, key, iv, macKey, algs) + if err != nil { + return nil, err + } + + return cbc, nil +} + +func newTripleDESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + c, err := des.NewTripleDESCipher(key) + if err != nil { + return nil, err + } + + cbc, err := newCBCCipher(c, key, iv, macKey, algs) + if err != nil { + return nil, err + } + + return cbc, nil +} + +func maxUInt32(a, b int) uint32 { + if a > b { + return uint32(a) + } + return uint32(b) +} + +const ( + cbcMinPacketSizeMultiple = 8 + cbcMinPacketSize = 16 + cbcMinPaddingSize = 4 +) + +// cbcError represents a verification error that may leak information. +type cbcError string + +func (e cbcError) Error() string { return string(e) } + +func (c *cbcCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { + p, err := c.readCipherPacketLeaky(seqNum, r) + if err != nil { + if _, ok := err.(cbcError); ok { + // Verification error: read a fixed amount of + // data, to make distinguishing between + // failing MAC and failing length check more + // difficult. + io.CopyN(io.Discard, r, int64(c.oracleCamouflage)) + } + } + return p, err +} + +func (c *cbcCipher) readCipherPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error) { + blockSize := c.decrypter.BlockSize() + + // Read the header, which will include some of the subsequent data in the + // case of block ciphers - this is copied back to the payload later. + // How many bytes of payload/padding will be read with this first read. + firstBlockLength := uint32((prefixLen + blockSize - 1) / blockSize * blockSize) + firstBlock := c.packetData[:firstBlockLength] + if _, err := io.ReadFull(r, firstBlock); err != nil { + return nil, err + } + + c.oracleCamouflage = maxPacket + 4 + c.macSize - firstBlockLength + + c.decrypter.CryptBlocks(firstBlock, firstBlock) + length := binary.BigEndian.Uint32(firstBlock[:4]) + if length > maxPacket { + return nil, cbcError("ssh: packet too large") + } + if length+4 < maxUInt32(cbcMinPacketSize, blockSize) { + // The minimum size of a packet is 16 (or the cipher block size, whichever + // is larger) bytes. + return nil, cbcError("ssh: packet too small") + } + // The length of the packet (including the length field but not the MAC) must + // be a multiple of the block size or 8, whichever is larger. + if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 { + return nil, cbcError("ssh: invalid packet length multiple") + } + + paddingLength := uint32(firstBlock[4]) + if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 { + return nil, cbcError("ssh: invalid packet length") + } + + // Positions within the c.packetData buffer: + macStart := 4 + length + paddingStart := macStart - paddingLength + + // Entire packet size, starting before length, ending at end of mac. + entirePacketSize := macStart + c.macSize + + // Ensure c.packetData is large enough for the entire packet data. + if uint32(cap(c.packetData)) < entirePacketSize { + // Still need to upsize and copy, but this should be rare at runtime, only + // on upsizing the packetData buffer. + c.packetData = make([]byte, entirePacketSize) + copy(c.packetData, firstBlock) + } else { + c.packetData = c.packetData[:entirePacketSize] + } + + n, err := io.ReadFull(r, c.packetData[firstBlockLength:]) + if err != nil { + return nil, err + } + c.oracleCamouflage -= uint32(n) + + remainingCrypted := c.packetData[firstBlockLength:macStart] + c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted) + + mac := c.packetData[macStart:] + if c.mac != nil { + c.mac.Reset() + binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) + c.mac.Write(c.seqNumBytes[:]) + c.mac.Write(c.packetData[:macStart]) + c.macResult = c.mac.Sum(c.macResult[:0]) + if subtle.ConstantTimeCompare(c.macResult, mac) != 1 { + return nil, cbcError("ssh: MAC failure") + } + } + + return c.packetData[prefixLen:paddingStart], nil +} + +func (c *cbcCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { + effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize()) + + // Length of encrypted portion of the packet (header, payload, padding). + // Enforce minimum padding and packet size. + encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize) + // Enforce block size. + encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize + + length := encLength - 4 + paddingLength := int(length) - (1 + len(packet)) + + // Overall buffer contains: header, payload, padding, mac. + // Space for the MAC is reserved in the capacity but not the slice length. + bufferSize := encLength + c.macSize + if uint32(cap(c.packetData)) < bufferSize { + c.packetData = make([]byte, encLength, bufferSize) + } else { + c.packetData = c.packetData[:encLength] + } + + p := c.packetData + + // Packet header. + binary.BigEndian.PutUint32(p, length) + p = p[4:] + p[0] = byte(paddingLength) + + // Payload. + p = p[1:] + copy(p, packet) + + // Padding. + p = p[len(packet):] + if _, err := io.ReadFull(rand, p); err != nil { + return err + } + + if c.mac != nil { + c.mac.Reset() + binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) + c.mac.Write(c.seqNumBytes[:]) + c.mac.Write(c.packetData) + // The MAC is now appended into the capacity reserved for it earlier. + c.packetData = c.mac.Sum(c.packetData) + } + + c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength]) + + if _, err := w.Write(c.packetData); err != nil { + return err + } + + return nil +} + +const chacha20Poly1305ID = "chacha20-poly1305@openssh.com" + +// chacha20Poly1305Cipher implements the chacha20-poly1305@openssh.com +// AEAD, which is described here: +// +// https://tools.ietf.org/html/draft-josefsson-ssh-chacha20-poly1305-openssh-00 +// +// the methods here also implement padding, which RFC 4253 Section 6 +// also requires of stream ciphers. +type chacha20Poly1305Cipher struct { + lengthKey [32]byte + contentKey [32]byte + buf []byte +} + +func newChaCha20Cipher(key, unusedIV, unusedMACKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { + if len(key) != 64 { + panic(len(key)) + } + + c := &chacha20Poly1305Cipher{ + buf: make([]byte, 256), + } + + copy(c.contentKey[:], key[:32]) + copy(c.lengthKey[:], key[32:]) + return c, nil +} + +func (c *chacha20Poly1305Cipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { + nonce := make([]byte, 12) + binary.BigEndian.PutUint32(nonce[8:], seqNum) + s, err := chacha20.NewUnauthenticatedCipher(c.contentKey[:], nonce) + if err != nil { + return nil, err + } + var polyKey, discardBuf [32]byte + s.XORKeyStream(polyKey[:], polyKey[:]) + s.XORKeyStream(discardBuf[:], discardBuf[:]) // skip the next 32 bytes + + encryptedLength := c.buf[:4] + if _, err := io.ReadFull(r, encryptedLength); err != nil { + return nil, err + } + + var lenBytes [4]byte + ls, err := chacha20.NewUnauthenticatedCipher(c.lengthKey[:], nonce) + if err != nil { + return nil, err + } + ls.XORKeyStream(lenBytes[:], encryptedLength) + + length := binary.BigEndian.Uint32(lenBytes[:]) + if length > maxPacket { + return nil, errors.New("ssh: invalid packet length, packet too large") + } + + contentEnd := 4 + length + packetEnd := contentEnd + poly1305.TagSize + if uint32(cap(c.buf)) < packetEnd { + c.buf = make([]byte, packetEnd) + copy(c.buf[:], encryptedLength) + } else { + c.buf = c.buf[:packetEnd] + } + + if _, err := io.ReadFull(r, c.buf[4:packetEnd]); err != nil { + return nil, err + } + + var mac [poly1305.TagSize]byte + copy(mac[:], c.buf[contentEnd:packetEnd]) + if !poly1305.Verify(&mac, c.buf[:contentEnd], &polyKey) { + return nil, errors.New("ssh: MAC failure") + } + + plain := c.buf[4:contentEnd] + s.XORKeyStream(plain, plain) + + if len(plain) == 0 { + return nil, errors.New("ssh: empty packet") + } + + padding := plain[0] + if padding < 4 { + // padding is a byte, so it automatically satisfies + // the maximum size, which is 255. + return nil, fmt.Errorf("ssh: illegal padding %d", padding) + } + + if int(padding)+1 >= len(plain) { + return nil, fmt.Errorf("ssh: padding %d too large", padding) + } + + plain = plain[1 : len(plain)-int(padding)] + + return plain, nil +} + +func (c *chacha20Poly1305Cipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, payload []byte) error { + nonce := make([]byte, 12) + binary.BigEndian.PutUint32(nonce[8:], seqNum) + s, err := chacha20.NewUnauthenticatedCipher(c.contentKey[:], nonce) + if err != nil { + return err + } + var polyKey, discardBuf [32]byte + s.XORKeyStream(polyKey[:], polyKey[:]) + s.XORKeyStream(discardBuf[:], discardBuf[:]) // skip the next 32 bytes + + // There is no blocksize, so fall back to multiple of 8 byte + // padding, as described in RFC 4253, Sec 6. + const packetSizeMultiple = 8 + + padding := packetSizeMultiple - (1+len(payload))%packetSizeMultiple + if padding < 4 { + padding += packetSizeMultiple + } + + // size (4 bytes), padding (1), payload, padding, tag. + totalLength := 4 + 1 + len(payload) + padding + poly1305.TagSize + if cap(c.buf) < totalLength { + c.buf = make([]byte, totalLength) + } else { + c.buf = c.buf[:totalLength] + } + + binary.BigEndian.PutUint32(c.buf, uint32(1+len(payload)+padding)) + ls, err := chacha20.NewUnauthenticatedCipher(c.lengthKey[:], nonce) + if err != nil { + return err + } + ls.XORKeyStream(c.buf, c.buf[:4]) + c.buf[4] = byte(padding) + copy(c.buf[5:], payload) + packetEnd := 5 + len(payload) + padding + if _, err := io.ReadFull(rand, c.buf[5+len(payload):packetEnd]); err != nil { + return err + } + + s.XORKeyStream(c.buf[4:], c.buf[4:packetEnd]) + + var mac [poly1305.TagSize]byte + poly1305.Sum(&mac, c.buf[:packetEnd], &polyKey) + + copy(c.buf[packetEnd:], mac[:]) + + if _, err := w.Write(c.buf); err != nil { + return err + } + return nil +} diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go new file mode 100644 index 000000000..fd8c49749 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/client.go @@ -0,0 +1,282 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "net" + "os" + "sync" + "time" +) + +// Client implements a traditional SSH client that supports shells, +// subprocesses, TCP port/streamlocal forwarding and tunneled dialing. +type Client struct { + Conn + + handleForwardsOnce sync.Once // guards calling (*Client).handleForwards + + forwards forwardList // forwarded tcpip connections from the remote side + mu sync.Mutex + channelHandlers map[string]chan NewChannel +} + +// HandleChannelOpen returns a channel on which NewChannel requests +// for the given type are sent. If the type already is being handled, +// nil is returned. The channel is closed when the connection is closed. +func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel { + c.mu.Lock() + defer c.mu.Unlock() + if c.channelHandlers == nil { + // The SSH channel has been closed. + c := make(chan NewChannel) + close(c) + return c + } + + ch := c.channelHandlers[channelType] + if ch != nil { + return nil + } + + ch = make(chan NewChannel, chanSize) + c.channelHandlers[channelType] = ch + return ch +} + +// NewClient creates a Client on top of the given connection. +func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client { + conn := &Client{ + Conn: c, + channelHandlers: make(map[string]chan NewChannel, 1), + } + + go conn.handleGlobalRequests(reqs) + go conn.handleChannelOpens(chans) + go func() { + conn.Wait() + conn.forwards.closeAll() + }() + return conn +} + +// NewClientConn establishes an authenticated SSH connection using c +// as the underlying transport. The Request and NewChannel channels +// must be serviced or the connection will hang. +func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan NewChannel, <-chan *Request, error) { + fullConf := *config + fullConf.SetDefaults() + if fullConf.HostKeyCallback == nil { + c.Close() + return nil, nil, nil, errors.New("ssh: must specify HostKeyCallback") + } + + conn := &connection{ + sshConn: sshConn{conn: c, user: fullConf.User}, + } + + if err := conn.clientHandshake(addr, &fullConf); err != nil { + c.Close() + return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %w", err) + } + conn.mux = newMux(conn.transport) + return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil +} + +// clientHandshake performs the client side key exchange. See RFC 4253 Section +// 7. +func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) error { + if config.ClientVersion != "" { + c.clientVersion = []byte(config.ClientVersion) + } else { + c.clientVersion = []byte(packageVersion) + } + var err error + c.serverVersion, err = exchangeVersions(c.sshConn.conn, c.clientVersion) + if err != nil { + return err + } + + c.transport = newClientTransport( + newTransport(c.sshConn.conn, config.Rand, true /* is client */), + c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr()) + if err := c.transport.waitSession(); err != nil { + return err + } + + c.sessionID = c.transport.getSessionID() + return c.clientAuthenticate(config) +} + +// verifyHostKeySignature verifies the host key obtained in the key exchange. +// algo is the negotiated algorithm, and may be a certificate type. +func verifyHostKeySignature(hostKey PublicKey, algo string, result *kexResult) error { + sig, rest, ok := parseSignatureBody(result.Signature) + if len(rest) > 0 || !ok { + return errors.New("ssh: signature parse error") + } + + if a := underlyingAlgo(algo); sig.Format != a { + return fmt.Errorf("ssh: invalid signature algorithm %q, expected %q", sig.Format, a) + } + + return hostKey.Verify(result.H, sig) +} + +// NewSession opens a new Session for this client. (A session is a remote +// execution of a program.) +func (c *Client) NewSession() (*Session, error) { + ch, in, err := c.OpenChannel("session", nil) + if err != nil { + return nil, err + } + return newSession(ch, in) +} + +func (c *Client) handleGlobalRequests(incoming <-chan *Request) { + for r := range incoming { + // This handles keepalive messages and matches + // the behaviour of OpenSSH. + r.Reply(false, nil) + } +} + +// handleChannelOpens channel open messages from the remote side. +func (c *Client) handleChannelOpens(in <-chan NewChannel) { + for ch := range in { + c.mu.Lock() + handler := c.channelHandlers[ch.ChannelType()] + c.mu.Unlock() + + if handler != nil { + handler <- ch + } else { + ch.Reject(UnknownChannelType, fmt.Sprintf("unknown channel type: %v", ch.ChannelType())) + } + } + + c.mu.Lock() + for _, ch := range c.channelHandlers { + close(ch) + } + c.channelHandlers = nil + c.mu.Unlock() +} + +// Dial starts a client connection to the given SSH server. It is a +// convenience function that connects to the given network address, +// initiates the SSH handshake, and then sets up a Client. For access +// to incoming channels and requests, use net.Dial with NewClientConn +// instead. +func Dial(network, addr string, config *ClientConfig) (*Client, error) { + conn, err := net.DialTimeout(network, addr, config.Timeout) + if err != nil { + return nil, err + } + c, chans, reqs, err := NewClientConn(conn, addr, config) + if err != nil { + return nil, err + } + return NewClient(c, chans, reqs), nil +} + +// HostKeyCallback is the function type used for verifying server +// keys. A HostKeyCallback must return nil if the host key is OK, or +// an error to reject it. It receives the hostname as passed to Dial +// or NewClientConn. The remote address is the RemoteAddr of the +// net.Conn underlying the SSH connection. +type HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error + +// BannerCallback is the function type used for treat the banner sent by +// the server. A BannerCallback receives the message sent by the remote server. +type BannerCallback func(message string) error + +// A ClientConfig structure is used to configure a Client. It must not be +// modified after having been passed to an SSH function. +type ClientConfig struct { + // Config contains configuration that is shared between clients and + // servers. + Config + + // User contains the username to authenticate as. + User string + + // Auth contains possible authentication methods to use with the + // server. Only the first instance of a particular RFC 4252 method will + // be used during authentication. + Auth []AuthMethod + + // HostKeyCallback is called during the cryptographic + // handshake to validate the server's host key. The client + // configuration must supply this callback for the connection + // to succeed. The functions InsecureIgnoreHostKey or + // FixedHostKey can be used for simplistic host key checks. + HostKeyCallback HostKeyCallback + + // BannerCallback is called during the SSH dance to display a custom + // server's message. The client configuration can supply this callback to + // handle it as wished. The function BannerDisplayStderr can be used for + // simplistic display on Stderr. + BannerCallback BannerCallback + + // ClientVersion contains the version identification string that will + // be used for the connection. If empty, a reasonable default is used. + ClientVersion string + + // HostKeyAlgorithms lists the public key algorithms that the client will + // accept from the server for host key authentication, in order of + // preference. If empty, a reasonable default is used. Any + // string returned from a PublicKey.Type method may be used, or + // any of the CertAlgo and KeyAlgo constants. + HostKeyAlgorithms []string + + // Timeout is the maximum amount of time for the TCP connection to establish. + // + // A Timeout of zero means no timeout. + Timeout time.Duration +} + +// InsecureIgnoreHostKey returns a function that can be used for +// ClientConfig.HostKeyCallback to accept any host key. It should +// not be used for production code. +func InsecureIgnoreHostKey() HostKeyCallback { + return func(hostname string, remote net.Addr, key PublicKey) error { + return nil + } +} + +type fixedHostKey struct { + key PublicKey +} + +func (f *fixedHostKey) check(hostname string, remote net.Addr, key PublicKey) error { + if f.key == nil { + return fmt.Errorf("ssh: required host key was nil") + } + if !bytes.Equal(key.Marshal(), f.key.Marshal()) { + return fmt.Errorf("ssh: host key mismatch") + } + return nil +} + +// FixedHostKey returns a function for use in +// ClientConfig.HostKeyCallback to accept only a specific host key. +func FixedHostKey(key PublicKey) HostKeyCallback { + hk := &fixedHostKey{key} + return hk.check +} + +// BannerDisplayStderr returns a function that can be used for +// ClientConfig.BannerCallback to display banners on os.Stderr. +func BannerDisplayStderr() BannerCallback { + return func(banner string) error { + _, err := os.Stderr.WriteString(banner) + + return err + } +} diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go new file mode 100644 index 000000000..b93961010 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/client_auth.go @@ -0,0 +1,791 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "io" + "strings" +) + +type authResult int + +const ( + authFailure authResult = iota + authPartialSuccess + authSuccess +) + +// clientAuthenticate authenticates with the remote server. See RFC 4252. +func (c *connection) clientAuthenticate(config *ClientConfig) error { + // initiate user auth session + if err := c.transport.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})); err != nil { + return err + } + packet, err := c.transport.readPacket() + if err != nil { + return err + } + // The server may choose to send a SSH_MSG_EXT_INFO at this point (if we + // advertised willingness to receive one, which we always do) or not. See + // RFC 8308, Section 2.4. + extensions := make(map[string][]byte) + if len(packet) > 0 && packet[0] == msgExtInfo { + var extInfo extInfoMsg + if err := Unmarshal(packet, &extInfo); err != nil { + return err + } + payload := extInfo.Payload + for i := uint32(0); i < extInfo.NumExtensions; i++ { + name, rest, ok := parseString(payload) + if !ok { + return parseError(msgExtInfo) + } + value, rest, ok := parseString(rest) + if !ok { + return parseError(msgExtInfo) + } + extensions[string(name)] = value + payload = rest + } + packet, err = c.transport.readPacket() + if err != nil { + return err + } + } + var serviceAccept serviceAcceptMsg + if err := Unmarshal(packet, &serviceAccept); err != nil { + return err + } + + // during the authentication phase the client first attempts the "none" method + // then any untried methods suggested by the server. + var tried []string + var lastMethods []string + + sessionID := c.transport.getSessionID() + for auth := AuthMethod(new(noneAuth)); auth != nil; { + ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand, extensions) + if err != nil { + // On disconnect, return error immediately + if _, ok := err.(*disconnectMsg); ok { + return err + } + // We return the error later if there is no other method left to + // try. + ok = authFailure + } + if ok == authSuccess { + // success + return nil + } else if ok == authFailure { + if m := auth.method(); !contains(tried, m) { + tried = append(tried, m) + } + } + if methods == nil { + methods = lastMethods + } + lastMethods = methods + + auth = nil + + findNext: + for _, a := range config.Auth { + candidateMethod := a.method() + if contains(tried, candidateMethod) { + continue + } + for _, meth := range methods { + if meth == candidateMethod { + auth = a + break findNext + } + } + } + + if auth == nil && err != nil { + // We have an error and there are no other authentication methods to + // try, so we return it. + return err + } + } + return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", tried) +} + +func contains(list []string, e string) bool { + for _, s := range list { + if s == e { + return true + } + } + return false +} + +// An AuthMethod represents an instance of an RFC 4252 authentication method. +type AuthMethod interface { + // auth authenticates user over transport t. + // Returns true if authentication is successful. + // If authentication is not successful, a []string of alternative + // method names is returned. If the slice is nil, it will be ignored + // and the previous set of possible methods will be reused. + auth(session []byte, user string, p packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error) + + // method returns the RFC 4252 method name. + method() string +} + +// "none" authentication, RFC 4252 section 5.2. +type noneAuth int + +func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { + if err := c.writePacket(Marshal(&userAuthRequestMsg{ + User: user, + Service: serviceSSH, + Method: "none", + })); err != nil { + return authFailure, nil, err + } + + return handleAuthResponse(c) +} + +func (n *noneAuth) method() string { + return "none" +} + +// passwordCallback is an AuthMethod that fetches the password through +// a function call, e.g. by prompting the user. +type passwordCallback func() (password string, err error) + +func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { + type passwordAuthMsg struct { + User string `sshtype:"50"` + Service string + Method string + Reply bool + Password string + } + + pw, err := cb() + // REVIEW NOTE: is there a need to support skipping a password attempt? + // The program may only find out that the user doesn't have a password + // when prompting. + if err != nil { + return authFailure, nil, err + } + + if err := c.writePacket(Marshal(&passwordAuthMsg{ + User: user, + Service: serviceSSH, + Method: cb.method(), + Reply: false, + Password: pw, + })); err != nil { + return authFailure, nil, err + } + + return handleAuthResponse(c) +} + +func (cb passwordCallback) method() string { + return "password" +} + +// Password returns an AuthMethod using the given password. +func Password(secret string) AuthMethod { + return passwordCallback(func() (string, error) { return secret, nil }) +} + +// PasswordCallback returns an AuthMethod that uses a callback for +// fetching a password. +func PasswordCallback(prompt func() (secret string, err error)) AuthMethod { + return passwordCallback(prompt) +} + +type publickeyAuthMsg struct { + User string `sshtype:"50"` + Service string + Method string + // HasSig indicates to the receiver packet that the auth request is signed and + // should be used for authentication of the request. + HasSig bool + Algoname string + PubKey []byte + // Sig is tagged with "rest" so Marshal will exclude it during + // validateKey + Sig []byte `ssh:"rest"` +} + +// publicKeyCallback is an AuthMethod that uses a set of key +// pairs for authentication. +type publicKeyCallback func() ([]Signer, error) + +func (cb publicKeyCallback) method() string { + return "publickey" +} + +func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (MultiAlgorithmSigner, string, error) { + var as MultiAlgorithmSigner + keyFormat := signer.PublicKey().Type() + + // If the signer implements MultiAlgorithmSigner we use the algorithms it + // support, if it implements AlgorithmSigner we assume it supports all + // algorithms, otherwise only the key format one. + switch s := signer.(type) { + case MultiAlgorithmSigner: + as = s + case AlgorithmSigner: + as = &multiAlgorithmSigner{ + AlgorithmSigner: s, + supportedAlgorithms: algorithmsForKeyFormat(underlyingAlgo(keyFormat)), + } + default: + as = &multiAlgorithmSigner{ + AlgorithmSigner: algorithmSignerWrapper{signer}, + supportedAlgorithms: []string{underlyingAlgo(keyFormat)}, + } + } + + getFallbackAlgo := func() (string, error) { + // Fallback to use if there is no "server-sig-algs" extension or a + // common algorithm cannot be found. We use the public key format if the + // MultiAlgorithmSigner supports it, otherwise we return an error. + if !contains(as.Algorithms(), underlyingAlgo(keyFormat)) { + return "", fmt.Errorf("ssh: no common public key signature algorithm, server only supports %q for key type %q, signer only supports %v", + underlyingAlgo(keyFormat), keyFormat, as.Algorithms()) + } + return keyFormat, nil + } + + extPayload, ok := extensions["server-sig-algs"] + if !ok { + // If there is no "server-sig-algs" extension use the fallback + // algorithm. + algo, err := getFallbackAlgo() + return as, algo, err + } + + // The server-sig-algs extension only carries underlying signature + // algorithm, but we are trying to select a protocol-level public key + // algorithm, which might be a certificate type. Extend the list of server + // supported algorithms to include the corresponding certificate algorithms. + serverAlgos := strings.Split(string(extPayload), ",") + for _, algo := range serverAlgos { + if certAlgo, ok := certificateAlgo(algo); ok { + serverAlgos = append(serverAlgos, certAlgo) + } + } + + // Filter algorithms based on those supported by MultiAlgorithmSigner. + var keyAlgos []string + for _, algo := range algorithmsForKeyFormat(keyFormat) { + if contains(as.Algorithms(), underlyingAlgo(algo)) { + keyAlgos = append(keyAlgos, algo) + } + } + + algo, err := findCommon("public key signature algorithm", keyAlgos, serverAlgos) + if err != nil { + // If there is no overlap, return the fallback algorithm to support + // servers that fail to list all supported algorithms. + algo, err := getFallbackAlgo() + return as, algo, err + } + return as, algo, nil +} + +func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error) { + // Authentication is performed by sending an enquiry to test if a key is + // acceptable to the remote. If the key is acceptable, the client will + // attempt to authenticate with the valid key. If not the client will repeat + // the process with the remaining keys. + + signers, err := cb() + if err != nil { + return authFailure, nil, err + } + var methods []string + var errSigAlgo error + + origSignersLen := len(signers) + for idx := 0; idx < len(signers); idx++ { + signer := signers[idx] + pub := signer.PublicKey() + as, algo, err := pickSignatureAlgorithm(signer, extensions) + if err != nil && errSigAlgo == nil { + // If we cannot negotiate a signature algorithm store the first + // error so we can return it to provide a more meaningful message if + // no other signers work. + errSigAlgo = err + continue + } + ok, err := validateKey(pub, algo, user, c) + if err != nil { + return authFailure, nil, err + } + // OpenSSH 7.2-7.7 advertises support for rsa-sha2-256 and rsa-sha2-512 + // in the "server-sig-algs" extension but doesn't support these + // algorithms for certificate authentication, so if the server rejects + // the key try to use the obtained algorithm as if "server-sig-algs" had + // not been implemented if supported from the algorithm signer. + if !ok && idx < origSignersLen && isRSACert(algo) && algo != CertAlgoRSAv01 { + if contains(as.Algorithms(), KeyAlgoRSA) { + // We retry using the compat algorithm after all signers have + // been tried normally. + signers = append(signers, &multiAlgorithmSigner{ + AlgorithmSigner: as, + supportedAlgorithms: []string{KeyAlgoRSA}, + }) + } + } + if !ok { + continue + } + + pubKey := pub.Marshal() + data := buildDataSignedForAuth(session, userAuthRequestMsg{ + User: user, + Service: serviceSSH, + Method: cb.method(), + }, algo, pubKey) + sign, err := as.SignWithAlgorithm(rand, data, underlyingAlgo(algo)) + if err != nil { + return authFailure, nil, err + } + + // manually wrap the serialized signature in a string + s := Marshal(sign) + sig := make([]byte, stringLength(len(s))) + marshalString(sig, s) + msg := publickeyAuthMsg{ + User: user, + Service: serviceSSH, + Method: cb.method(), + HasSig: true, + Algoname: algo, + PubKey: pubKey, + Sig: sig, + } + p := Marshal(&msg) + if err := c.writePacket(p); err != nil { + return authFailure, nil, err + } + var success authResult + success, methods, err = handleAuthResponse(c) + if err != nil { + return authFailure, nil, err + } + + // If authentication succeeds or the list of available methods does not + // contain the "publickey" method, do not attempt to authenticate with any + // other keys. According to RFC 4252 Section 7, the latter can occur when + // additional authentication methods are required. + if success == authSuccess || !contains(methods, cb.method()) { + return success, methods, err + } + } + + return authFailure, methods, errSigAlgo +} + +// validateKey validates the key provided is acceptable to the server. +func validateKey(key PublicKey, algo string, user string, c packetConn) (bool, error) { + pubKey := key.Marshal() + msg := publickeyAuthMsg{ + User: user, + Service: serviceSSH, + Method: "publickey", + HasSig: false, + Algoname: algo, + PubKey: pubKey, + } + if err := c.writePacket(Marshal(&msg)); err != nil { + return false, err + } + + return confirmKeyAck(key, c) +} + +func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { + pubKey := key.Marshal() + + for { + packet, err := c.readPacket() + if err != nil { + return false, err + } + switch packet[0] { + case msgUserAuthBanner: + if err := handleBannerResponse(c, packet); err != nil { + return false, err + } + case msgUserAuthPubKeyOk: + var msg userAuthPubKeyOkMsg + if err := Unmarshal(packet, &msg); err != nil { + return false, err + } + // According to RFC 4252 Section 7 the algorithm in + // SSH_MSG_USERAUTH_PK_OK should match that of the request but some + // servers send the key type instead. OpenSSH allows any algorithm + // that matches the public key, so we do the same. + // https://github.com/openssh/openssh-portable/blob/86bdd385/sshconnect2.c#L709 + if !contains(algorithmsForKeyFormat(key.Type()), msg.Algo) { + return false, nil + } + if !bytes.Equal(msg.PubKey, pubKey) { + return false, nil + } + return true, nil + case msgUserAuthFailure: + return false, nil + default: + return false, unexpectedMessageError(msgUserAuthPubKeyOk, packet[0]) + } + } +} + +// PublicKeys returns an AuthMethod that uses the given key +// pairs. +func PublicKeys(signers ...Signer) AuthMethod { + return publicKeyCallback(func() ([]Signer, error) { return signers, nil }) +} + +// PublicKeysCallback returns an AuthMethod that runs the given +// function to obtain a list of key pairs. +func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMethod { + return publicKeyCallback(getSigners) +} + +// handleAuthResponse returns whether the preceding authentication request succeeded +// along with a list of remaining authentication methods to try next and +// an error if an unexpected response was received. +func handleAuthResponse(c packetConn) (authResult, []string, error) { + gotMsgExtInfo := false + for { + packet, err := c.readPacket() + if err != nil { + return authFailure, nil, err + } + + switch packet[0] { + case msgUserAuthBanner: + if err := handleBannerResponse(c, packet); err != nil { + return authFailure, nil, err + } + case msgExtInfo: + // Ignore post-authentication RFC 8308 extensions, once. + if gotMsgExtInfo { + return authFailure, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) + } + gotMsgExtInfo = true + case msgUserAuthFailure: + var msg userAuthFailureMsg + if err := Unmarshal(packet, &msg); err != nil { + return authFailure, nil, err + } + if msg.PartialSuccess { + return authPartialSuccess, msg.Methods, nil + } + return authFailure, msg.Methods, nil + case msgUserAuthSuccess: + return authSuccess, nil, nil + default: + return authFailure, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) + } + } +} + +func handleBannerResponse(c packetConn, packet []byte) error { + var msg userAuthBannerMsg + if err := Unmarshal(packet, &msg); err != nil { + return err + } + + transport, ok := c.(*handshakeTransport) + if !ok { + return nil + } + + if transport.bannerCallback != nil { + return transport.bannerCallback(msg.Message) + } + + return nil +} + +// KeyboardInteractiveChallenge should print questions, optionally +// disabling echoing (e.g. for passwords), and return all the answers. +// Challenge may be called multiple times in a single session. After +// successful authentication, the server may send a challenge with no +// questions, for which the name and instruction messages should be +// printed. RFC 4256 section 3.3 details how the UI should behave for +// both CLI and GUI environments. +type KeyboardInteractiveChallenge func(name, instruction string, questions []string, echos []bool) (answers []string, err error) + +// KeyboardInteractive returns an AuthMethod using a prompt/response +// sequence controlled by the server. +func KeyboardInteractive(challenge KeyboardInteractiveChallenge) AuthMethod { + return challenge +} + +func (cb KeyboardInteractiveChallenge) method() string { + return "keyboard-interactive" +} + +func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { + type initiateMsg struct { + User string `sshtype:"50"` + Service string + Method string + Language string + Submethods string + } + + if err := c.writePacket(Marshal(&initiateMsg{ + User: user, + Service: serviceSSH, + Method: "keyboard-interactive", + })); err != nil { + return authFailure, nil, err + } + + gotMsgExtInfo := false + for { + packet, err := c.readPacket() + if err != nil { + return authFailure, nil, err + } + + // like handleAuthResponse, but with less options. + switch packet[0] { + case msgUserAuthBanner: + if err := handleBannerResponse(c, packet); err != nil { + return authFailure, nil, err + } + continue + case msgExtInfo: + // Ignore post-authentication RFC 8308 extensions, once. + if gotMsgExtInfo { + return authFailure, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) + } + gotMsgExtInfo = true + continue + case msgUserAuthInfoRequest: + // OK + case msgUserAuthFailure: + var msg userAuthFailureMsg + if err := Unmarshal(packet, &msg); err != nil { + return authFailure, nil, err + } + if msg.PartialSuccess { + return authPartialSuccess, msg.Methods, nil + } + return authFailure, msg.Methods, nil + case msgUserAuthSuccess: + return authSuccess, nil, nil + default: + return authFailure, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) + } + + var msg userAuthInfoRequestMsg + if err := Unmarshal(packet, &msg); err != nil { + return authFailure, nil, err + } + + // Manually unpack the prompt/echo pairs. + rest := msg.Prompts + var prompts []string + var echos []bool + for i := 0; i < int(msg.NumPrompts); i++ { + prompt, r, ok := parseString(rest) + if !ok || len(r) == 0 { + return authFailure, nil, errors.New("ssh: prompt format error") + } + prompts = append(prompts, string(prompt)) + echos = append(echos, r[0] != 0) + rest = r[1:] + } + + if len(rest) != 0 { + return authFailure, nil, errors.New("ssh: extra data following keyboard-interactive pairs") + } + + answers, err := cb(msg.Name, msg.Instruction, prompts, echos) + if err != nil { + return authFailure, nil, err + } + + if len(answers) != len(prompts) { + return authFailure, nil, fmt.Errorf("ssh: incorrect number of answers from keyboard-interactive callback %d (expected %d)", len(answers), len(prompts)) + } + responseLength := 1 + 4 + for _, a := range answers { + responseLength += stringLength(len(a)) + } + serialized := make([]byte, responseLength) + p := serialized + p[0] = msgUserAuthInfoResponse + p = p[1:] + p = marshalUint32(p, uint32(len(answers))) + for _, a := range answers { + p = marshalString(p, []byte(a)) + } + + if err := c.writePacket(serialized); err != nil { + return authFailure, nil, err + } + } +} + +type retryableAuthMethod struct { + authMethod AuthMethod + maxTries int +} + +func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (ok authResult, methods []string, err error) { + for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ { + ok, methods, err = r.authMethod.auth(session, user, c, rand, extensions) + if ok != authFailure || err != nil { // either success, partial success or error terminate + return ok, methods, err + } + } + return ok, methods, err +} + +func (r *retryableAuthMethod) method() string { + return r.authMethod.method() +} + +// RetryableAuthMethod is a decorator for other auth methods enabling them to +// be retried up to maxTries before considering that AuthMethod itself failed. +// If maxTries is <= 0, will retry indefinitely +// +// This is useful for interactive clients using challenge/response type +// authentication (e.g. Keyboard-Interactive, Password, etc) where the user +// could mistype their response resulting in the server issuing a +// SSH_MSG_USERAUTH_FAILURE (rfc4252 #8 [password] and rfc4256 #3.4 +// [keyboard-interactive]); Without this decorator, the non-retryable +// AuthMethod would be removed from future consideration, and never tried again +// (and so the user would never be able to retry their entry). +func RetryableAuthMethod(auth AuthMethod, maxTries int) AuthMethod { + return &retryableAuthMethod{authMethod: auth, maxTries: maxTries} +} + +// GSSAPIWithMICAuthMethod is an AuthMethod with "gssapi-with-mic" authentication. +// See RFC 4462 section 3 +// gssAPIClient is implementation of the GSSAPIClient interface, see the definition of the interface for details. +// target is the server host you want to log in to. +func GSSAPIWithMICAuthMethod(gssAPIClient GSSAPIClient, target string) AuthMethod { + if gssAPIClient == nil { + panic("gss-api client must be not nil with enable gssapi-with-mic") + } + return &gssAPIWithMICCallback{gssAPIClient: gssAPIClient, target: target} +} + +type gssAPIWithMICCallback struct { + gssAPIClient GSSAPIClient + target string +} + +func (g *gssAPIWithMICCallback) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { + m := &userAuthRequestMsg{ + User: user, + Service: serviceSSH, + Method: g.method(), + } + // The GSS-API authentication method is initiated when the client sends an SSH_MSG_USERAUTH_REQUEST. + // See RFC 4462 section 3.2. + m.Payload = appendU32(m.Payload, 1) + m.Payload = appendString(m.Payload, string(krb5OID)) + if err := c.writePacket(Marshal(m)); err != nil { + return authFailure, nil, err + } + // The server responds to the SSH_MSG_USERAUTH_REQUEST with either an + // SSH_MSG_USERAUTH_FAILURE if none of the mechanisms are supported or + // with an SSH_MSG_USERAUTH_GSSAPI_RESPONSE. + // See RFC 4462 section 3.3. + // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication,so I don't want to check + // selected mech if it is valid. + packet, err := c.readPacket() + if err != nil { + return authFailure, nil, err + } + userAuthGSSAPIResp := &userAuthGSSAPIResponse{} + if err := Unmarshal(packet, userAuthGSSAPIResp); err != nil { + return authFailure, nil, err + } + // Start the loop into the exchange token. + // See RFC 4462 section 3.4. + var token []byte + defer g.gssAPIClient.DeleteSecContext() + for { + // Initiates the establishment of a security context between the application and a remote peer. + nextToken, needContinue, err := g.gssAPIClient.InitSecContext("host@"+g.target, token, false) + if err != nil { + return authFailure, nil, err + } + if len(nextToken) > 0 { + if err := c.writePacket(Marshal(&userAuthGSSAPIToken{ + Token: nextToken, + })); err != nil { + return authFailure, nil, err + } + } + if !needContinue { + break + } + packet, err = c.readPacket() + if err != nil { + return authFailure, nil, err + } + switch packet[0] { + case msgUserAuthFailure: + var msg userAuthFailureMsg + if err := Unmarshal(packet, &msg); err != nil { + return authFailure, nil, err + } + if msg.PartialSuccess { + return authPartialSuccess, msg.Methods, nil + } + return authFailure, msg.Methods, nil + case msgUserAuthGSSAPIError: + userAuthGSSAPIErrorResp := &userAuthGSSAPIError{} + if err := Unmarshal(packet, userAuthGSSAPIErrorResp); err != nil { + return authFailure, nil, err + } + return authFailure, nil, fmt.Errorf("GSS-API Error:\n"+ + "Major Status: %d\n"+ + "Minor Status: %d\n"+ + "Error Message: %s\n", userAuthGSSAPIErrorResp.MajorStatus, userAuthGSSAPIErrorResp.MinorStatus, + userAuthGSSAPIErrorResp.Message) + case msgUserAuthGSSAPIToken: + userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} + if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { + return authFailure, nil, err + } + token = userAuthGSSAPITokenReq.Token + } + } + // Binding Encryption Keys. + // See RFC 4462 section 3.5. + micField := buildMIC(string(session), user, "ssh-connection", "gssapi-with-mic") + micToken, err := g.gssAPIClient.GetMIC(micField) + if err != nil { + return authFailure, nil, err + } + if err := c.writePacket(Marshal(&userAuthGSSAPIMIC{ + MIC: micToken, + })); err != nil { + return authFailure, nil, err + } + return handleAuthResponse(c) +} + +func (g *gssAPIWithMICCallback) method() string { + return "gssapi-with-mic" +} diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go new file mode 100644 index 000000000..7e9c2cbc6 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/common.go @@ -0,0 +1,476 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto" + "crypto/rand" + "fmt" + "io" + "math" + "sync" + + _ "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" +) + +// These are string constants in the SSH protocol. +const ( + compressionNone = "none" + serviceUserAuth = "ssh-userauth" + serviceSSH = "ssh-connection" +) + +// supportedCiphers lists ciphers we support but might not recommend. +var supportedCiphers = []string{ + "aes128-ctr", "aes192-ctr", "aes256-ctr", + "aes128-gcm@openssh.com", gcm256CipherID, + chacha20Poly1305ID, + "arcfour256", "arcfour128", "arcfour", + aes128cbcID, + tripledescbcID, +} + +// preferredCiphers specifies the default preference for ciphers. +var preferredCiphers = []string{ + "aes128-gcm@openssh.com", gcm256CipherID, + chacha20Poly1305ID, + "aes128-ctr", "aes192-ctr", "aes256-ctr", +} + +// supportedKexAlgos specifies the supported key-exchange algorithms in +// preference order. +var supportedKexAlgos = []string{ + kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH, + // P384 and P521 are not constant-time yet, but since we don't + // reuse ephemeral keys, using them for ECDH should be OK. + kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, + kexAlgoDH14SHA256, kexAlgoDH16SHA512, kexAlgoDH14SHA1, + kexAlgoDH1SHA1, +} + +// serverForbiddenKexAlgos contains key exchange algorithms, that are forbidden +// for the server half. +var serverForbiddenKexAlgos = map[string]struct{}{ + kexAlgoDHGEXSHA1: {}, // server half implementation is only minimal to satisfy the automated tests + kexAlgoDHGEXSHA256: {}, // server half implementation is only minimal to satisfy the automated tests +} + +// preferredKexAlgos specifies the default preference for key-exchange +// algorithms in preference order. The diffie-hellman-group16-sha512 algorithm +// is disabled by default because it is a bit slower than the others. +var preferredKexAlgos = []string{ + kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH, + kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, + kexAlgoDH14SHA256, kexAlgoDH14SHA1, +} + +// supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods +// of authenticating servers) in preference order. +var supportedHostKeyAlgos = []string{ + CertAlgoRSASHA256v01, CertAlgoRSASHA512v01, + CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, + CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, + + KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, + KeyAlgoRSASHA256, KeyAlgoRSASHA512, + KeyAlgoRSA, KeyAlgoDSA, + + KeyAlgoED25519, +} + +// supportedMACs specifies a default set of MAC algorithms in preference order. +// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed +// because they have reached the end of their useful life. +var supportedMACs = []string{ + "hmac-sha2-256-etm@openssh.com", "hmac-sha2-512-etm@openssh.com", "hmac-sha2-256", "hmac-sha2-512", "hmac-sha1", "hmac-sha1-96", +} + +var supportedCompressions = []string{compressionNone} + +// hashFuncs keeps the mapping of supported signature algorithms to their +// respective hashes needed for signing and verification. +var hashFuncs = map[string]crypto.Hash{ + KeyAlgoRSA: crypto.SHA1, + KeyAlgoRSASHA256: crypto.SHA256, + KeyAlgoRSASHA512: crypto.SHA512, + KeyAlgoDSA: crypto.SHA1, + KeyAlgoECDSA256: crypto.SHA256, + KeyAlgoECDSA384: crypto.SHA384, + KeyAlgoECDSA521: crypto.SHA512, + // KeyAlgoED25519 doesn't pre-hash. + KeyAlgoSKECDSA256: crypto.SHA256, + KeyAlgoSKED25519: crypto.SHA256, +} + +// algorithmsForKeyFormat returns the supported signature algorithms for a given +// public key format (PublicKey.Type), in order of preference. See RFC 8332, +// Section 2. See also the note in sendKexInit on backwards compatibility. +func algorithmsForKeyFormat(keyFormat string) []string { + switch keyFormat { + case KeyAlgoRSA: + return []string{KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoRSA} + case CertAlgoRSAv01: + return []string{CertAlgoRSASHA256v01, CertAlgoRSASHA512v01, CertAlgoRSAv01} + default: + return []string{keyFormat} + } +} + +// isRSA returns whether algo is a supported RSA algorithm, including certificate +// algorithms. +func isRSA(algo string) bool { + algos := algorithmsForKeyFormat(KeyAlgoRSA) + return contains(algos, underlyingAlgo(algo)) +} + +func isRSACert(algo string) bool { + _, ok := certKeyAlgoNames[algo] + if !ok { + return false + } + return isRSA(algo) +} + +// supportedPubKeyAuthAlgos specifies the supported client public key +// authentication algorithms. Note that this doesn't include certificate types +// since those use the underlying algorithm. This list is sent to the client if +// it supports the server-sig-algs extension. Order is irrelevant. +var supportedPubKeyAuthAlgos = []string{ + KeyAlgoED25519, + KeyAlgoSKED25519, KeyAlgoSKECDSA256, + KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, + KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoRSA, + KeyAlgoDSA, +} + +// unexpectedMessageError results when the SSH message that we received didn't +// match what we wanted. +func unexpectedMessageError(expected, got uint8) error { + return fmt.Errorf("ssh: unexpected message type %d (expected %d)", got, expected) +} + +// parseError results from a malformed SSH message. +func parseError(tag uint8) error { + return fmt.Errorf("ssh: parse error in message type %d", tag) +} + +func findCommon(what string, client []string, server []string) (common string, err error) { + for _, c := range client { + for _, s := range server { + if c == s { + return c, nil + } + } + } + return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server) +} + +// directionAlgorithms records algorithm choices in one direction (either read or write) +type directionAlgorithms struct { + Cipher string + MAC string + Compression string +} + +// rekeyBytes returns a rekeying intervals in bytes. +func (a *directionAlgorithms) rekeyBytes() int64 { + // According to RFC 4344 block ciphers should rekey after + // 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is + // 128. + switch a.Cipher { + case "aes128-ctr", "aes192-ctr", "aes256-ctr", gcm128CipherID, gcm256CipherID, aes128cbcID: + return 16 * (1 << 32) + + } + + // For others, stick with RFC 4253 recommendation to rekey after 1 Gb of data. + return 1 << 30 +} + +var aeadCiphers = map[string]bool{ + gcm128CipherID: true, + gcm256CipherID: true, + chacha20Poly1305ID: true, +} + +type algorithms struct { + kex string + hostKey string + w directionAlgorithms + r directionAlgorithms +} + +func findAgreedAlgorithms(isClient bool, clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) { + result := &algorithms{} + + result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos) + if err != nil { + return + } + + result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos) + if err != nil { + return + } + + stoc, ctos := &result.w, &result.r + if isClient { + ctos, stoc = stoc, ctos + } + + ctos.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer) + if err != nil { + return + } + + stoc.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient) + if err != nil { + return + } + + if !aeadCiphers[ctos.Cipher] { + ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) + if err != nil { + return + } + } + + if !aeadCiphers[stoc.Cipher] { + stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) + if err != nil { + return + } + } + + ctos.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) + if err != nil { + return + } + + stoc.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient) + if err != nil { + return + } + + return result, nil +} + +// If rekeythreshold is too small, we can't make any progress sending +// stuff. +const minRekeyThreshold uint64 = 256 + +// Config contains configuration data common to both ServerConfig and +// ClientConfig. +type Config struct { + // Rand provides the source of entropy for cryptographic + // primitives. If Rand is nil, the cryptographic random reader + // in package crypto/rand will be used. + Rand io.Reader + + // The maximum number of bytes sent or received after which a + // new key is negotiated. It must be at least 256. If + // unspecified, a size suitable for the chosen cipher is used. + RekeyThreshold uint64 + + // The allowed key exchanges algorithms. If unspecified then a default set + // of algorithms is used. Unsupported values are silently ignored. + KeyExchanges []string + + // The allowed cipher algorithms. If unspecified then a sensible default is + // used. Unsupported values are silently ignored. + Ciphers []string + + // The allowed MAC algorithms. If unspecified then a sensible default is + // used. Unsupported values are silently ignored. + MACs []string +} + +// SetDefaults sets sensible values for unset fields in config. This is +// exported for testing: Configs passed to SSH functions are copied and have +// default values set automatically. +func (c *Config) SetDefaults() { + if c.Rand == nil { + c.Rand = rand.Reader + } + if c.Ciphers == nil { + c.Ciphers = preferredCiphers + } + var ciphers []string + for _, c := range c.Ciphers { + if cipherModes[c] != nil { + // Ignore the cipher if we have no cipherModes definition. + ciphers = append(ciphers, c) + } + } + c.Ciphers = ciphers + + if c.KeyExchanges == nil { + c.KeyExchanges = preferredKexAlgos + } + var kexs []string + for _, k := range c.KeyExchanges { + if kexAlgoMap[k] != nil { + // Ignore the KEX if we have no kexAlgoMap definition. + kexs = append(kexs, k) + } + } + c.KeyExchanges = kexs + + if c.MACs == nil { + c.MACs = supportedMACs + } + var macs []string + for _, m := range c.MACs { + if macModes[m] != nil { + // Ignore the MAC if we have no macModes definition. + macs = append(macs, m) + } + } + c.MACs = macs + + if c.RekeyThreshold == 0 { + // cipher specific default + } else if c.RekeyThreshold < minRekeyThreshold { + c.RekeyThreshold = minRekeyThreshold + } else if c.RekeyThreshold >= math.MaxInt64 { + // Avoid weirdness if somebody uses -1 as a threshold. + c.RekeyThreshold = math.MaxInt64 + } +} + +// buildDataSignedForAuth returns the data that is signed in order to prove +// possession of a private key. See RFC 4252, section 7. algo is the advertised +// algorithm, and may be a certificate type. +func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo string, pubKey []byte) []byte { + data := struct { + Session []byte + Type byte + User string + Service string + Method string + Sign bool + Algo string + PubKey []byte + }{ + sessionID, + msgUserAuthRequest, + req.User, + req.Service, + req.Method, + true, + algo, + pubKey, + } + return Marshal(data) +} + +func appendU16(buf []byte, n uint16) []byte { + return append(buf, byte(n>>8), byte(n)) +} + +func appendU32(buf []byte, n uint32) []byte { + return append(buf, byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) +} + +func appendU64(buf []byte, n uint64) []byte { + return append(buf, + byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), + byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) +} + +func appendInt(buf []byte, n int) []byte { + return appendU32(buf, uint32(n)) +} + +func appendString(buf []byte, s string) []byte { + buf = appendU32(buf, uint32(len(s))) + buf = append(buf, s...) + return buf +} + +func appendBool(buf []byte, b bool) []byte { + if b { + return append(buf, 1) + } + return append(buf, 0) +} + +// newCond is a helper to hide the fact that there is no usable zero +// value for sync.Cond. +func newCond() *sync.Cond { return sync.NewCond(new(sync.Mutex)) } + +// window represents the buffer available to clients +// wishing to write to a channel. +type window struct { + *sync.Cond + win uint32 // RFC 4254 5.2 says the window size can grow to 2^32-1 + writeWaiters int + closed bool +} + +// add adds win to the amount of window available +// for consumers. +func (w *window) add(win uint32) bool { + // a zero sized window adjust is a noop. + if win == 0 { + return true + } + w.L.Lock() + if w.win+win < win { + w.L.Unlock() + return false + } + w.win += win + // It is unusual that multiple goroutines would be attempting to reserve + // window space, but not guaranteed. Use broadcast to notify all waiters + // that additional window is available. + w.Broadcast() + w.L.Unlock() + return true +} + +// close sets the window to closed, so all reservations fail +// immediately. +func (w *window) close() { + w.L.Lock() + w.closed = true + w.Broadcast() + w.L.Unlock() +} + +// reserve reserves win from the available window capacity. +// If no capacity remains, reserve will block. reserve may +// return less than requested. +func (w *window) reserve(win uint32) (uint32, error) { + var err error + w.L.Lock() + w.writeWaiters++ + w.Broadcast() + for w.win == 0 && !w.closed { + w.Wait() + } + w.writeWaiters-- + if w.win < win { + win = w.win + } + w.win -= win + if w.closed { + err = io.EOF + } + w.L.Unlock() + return win, err +} + +// waitWriterBlocked waits until some goroutine is blocked for further +// writes. It is used in tests only. +func (w *window) waitWriterBlocked() { + w.Cond.L.Lock() + for w.writeWaiters == 0 { + w.Cond.Wait() + } + w.Cond.L.Unlock() +} diff --git a/vendor/golang.org/x/crypto/ssh/connection.go b/vendor/golang.org/x/crypto/ssh/connection.go new file mode 100644 index 000000000..8f345ee92 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/connection.go @@ -0,0 +1,143 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "fmt" + "net" +) + +// OpenChannelError is returned if the other side rejects an +// OpenChannel request. +type OpenChannelError struct { + Reason RejectionReason + Message string +} + +func (e *OpenChannelError) Error() string { + return fmt.Sprintf("ssh: rejected: %s (%s)", e.Reason, e.Message) +} + +// ConnMetadata holds metadata for the connection. +type ConnMetadata interface { + // User returns the user ID for this connection. + User() string + + // SessionID returns the session hash, also denoted by H. + SessionID() []byte + + // ClientVersion returns the client's version string as hashed + // into the session ID. + ClientVersion() []byte + + // ServerVersion returns the server's version string as hashed + // into the session ID. + ServerVersion() []byte + + // RemoteAddr returns the remote address for this connection. + RemoteAddr() net.Addr + + // LocalAddr returns the local address for this connection. + LocalAddr() net.Addr +} + +// Conn represents an SSH connection for both server and client roles. +// Conn is the basis for implementing an application layer, such +// as ClientConn, which implements the traditional shell access for +// clients. +type Conn interface { + ConnMetadata + + // SendRequest sends a global request, and returns the + // reply. If wantReply is true, it returns the response status + // and payload. See also RFC 4254, section 4. + SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) + + // OpenChannel tries to open an channel. If the request is + // rejected, it returns *OpenChannelError. On success it returns + // the SSH Channel and a Go channel for incoming, out-of-band + // requests. The Go channel must be serviced, or the + // connection will hang. + OpenChannel(name string, data []byte) (Channel, <-chan *Request, error) + + // Close closes the underlying network connection + Close() error + + // Wait blocks until the connection has shut down, and returns the + // error causing the shutdown. + Wait() error + + // TODO(hanwen): consider exposing: + // RequestKeyChange + // Disconnect +} + +// DiscardRequests consumes and rejects all requests from the +// passed-in channel. +func DiscardRequests(in <-chan *Request) { + for req := range in { + if req.WantReply { + req.Reply(false, nil) + } + } +} + +// A connection represents an incoming connection. +type connection struct { + transport *handshakeTransport + sshConn + + // The connection protocol. + *mux +} + +func (c *connection) Close() error { + return c.sshConn.conn.Close() +} + +// sshConn provides net.Conn metadata, but disallows direct reads and +// writes. +type sshConn struct { + conn net.Conn + + user string + sessionID []byte + clientVersion []byte + serverVersion []byte +} + +func dup(src []byte) []byte { + dst := make([]byte, len(src)) + copy(dst, src) + return dst +} + +func (c *sshConn) User() string { + return c.user +} + +func (c *sshConn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +func (c *sshConn) Close() error { + return c.conn.Close() +} + +func (c *sshConn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +func (c *sshConn) SessionID() []byte { + return dup(c.sessionID) +} + +func (c *sshConn) ClientVersion() []byte { + return dup(c.clientVersion) +} + +func (c *sshConn) ServerVersion() []byte { + return dup(c.serverVersion) +} diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go new file mode 100644 index 000000000..f5d352fe3 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/doc.go @@ -0,0 +1,23 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package ssh implements an SSH client and server. + +SSH is a transport security protocol, an authentication protocol and a +family of application protocols. The most typical application level +protocol is a remote shell and this is specifically implemented. However, +the multiplexed nature of SSH is exposed to users that wish to support +others. + +References: + + [PROTOCOL]: https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=HEAD + [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD + [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 + +This package does not fall under the stability promise of the Go language itself, +so its API may be changed when pressing needs arise. +*/ +package ssh diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go new file mode 100644 index 000000000..56cdc7c21 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/handshake.go @@ -0,0 +1,806 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto/rand" + "errors" + "fmt" + "io" + "log" + "net" + "strings" + "sync" +) + +// debugHandshake, if set, prints messages sent and received. Key +// exchange messages are printed as if DH were used, so the debug +// messages are wrong when using ECDH. +const debugHandshake = false + +// chanSize sets the amount of buffering SSH connections. This is +// primarily for testing: setting chanSize=0 uncovers deadlocks more +// quickly. +const chanSize = 16 + +// keyingTransport is a packet based transport that supports key +// changes. It need not be thread-safe. It should pass through +// msgNewKeys in both directions. +type keyingTransport interface { + packetConn + + // prepareKeyChange sets up a key change. The key change for a + // direction will be effected if a msgNewKeys message is sent + // or received. + prepareKeyChange(*algorithms, *kexResult) error + + // setStrictMode sets the strict KEX mode, notably triggering + // sequence number resets on sending or receiving msgNewKeys. + // If the sequence number is already > 1 when setStrictMode + // is called, an error is returned. + setStrictMode() error + + // setInitialKEXDone indicates to the transport that the initial key exchange + // was completed + setInitialKEXDone() +} + +// handshakeTransport implements rekeying on top of a keyingTransport +// and offers a thread-safe writePacket() interface. +type handshakeTransport struct { + conn keyingTransport + config *Config + + serverVersion []byte + clientVersion []byte + + // hostKeys is non-empty if we are the server. In that case, + // it contains all host keys that can be used to sign the + // connection. + hostKeys []Signer + + // publicKeyAuthAlgorithms is non-empty if we are the server. In that case, + // it contains the supported client public key authentication algorithms. + publicKeyAuthAlgorithms []string + + // hostKeyAlgorithms is non-empty if we are the client. In that case, + // we accept these key types from the server as host key. + hostKeyAlgorithms []string + + // On read error, incoming is closed, and readError is set. + incoming chan []byte + readError error + + mu sync.Mutex + writeError error + sentInitPacket []byte + sentInitMsg *kexInitMsg + pendingPackets [][]byte // Used when a key exchange is in progress. + writePacketsLeft uint32 + writeBytesLeft int64 + + // If the read loop wants to schedule a kex, it pings this + // channel, and the write loop will send out a kex + // message. + requestKex chan struct{} + + // If the other side requests or confirms a kex, its kexInit + // packet is sent here for the write loop to find it. + startKex chan *pendingKex + kexLoopDone chan struct{} // closed (with writeError non-nil) when kexLoop exits + + // data for host key checking + hostKeyCallback HostKeyCallback + dialAddress string + remoteAddr net.Addr + + // bannerCallback is non-empty if we are the client and it has been set in + // ClientConfig. In that case it is called during the user authentication + // dance to handle a custom server's message. + bannerCallback BannerCallback + + // Algorithms agreed in the last key exchange. + algorithms *algorithms + + // Counters exclusively owned by readLoop. + readPacketsLeft uint32 + readBytesLeft int64 + + // The session ID or nil if first kex did not complete yet. + sessionID []byte + + // strictMode indicates if the other side of the handshake indicated + // that we should be following the strict KEX protocol restrictions. + strictMode bool +} + +type pendingKex struct { + otherInit []byte + done chan error +} + +func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport { + t := &handshakeTransport{ + conn: conn, + serverVersion: serverVersion, + clientVersion: clientVersion, + incoming: make(chan []byte, chanSize), + requestKex: make(chan struct{}, 1), + startKex: make(chan *pendingKex), + kexLoopDone: make(chan struct{}), + + config: config, + } + t.resetReadThresholds() + t.resetWriteThresholds() + + // We always start with a mandatory key exchange. + t.requestKex <- struct{}{} + return t +} + +func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ClientConfig, dialAddr string, addr net.Addr) *handshakeTransport { + t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) + t.dialAddress = dialAddr + t.remoteAddr = addr + t.hostKeyCallback = config.HostKeyCallback + t.bannerCallback = config.BannerCallback + if config.HostKeyAlgorithms != nil { + t.hostKeyAlgorithms = config.HostKeyAlgorithms + } else { + t.hostKeyAlgorithms = supportedHostKeyAlgos + } + go t.readLoop() + go t.kexLoop() + return t +} + +func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport { + t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) + t.hostKeys = config.hostKeys + t.publicKeyAuthAlgorithms = config.PublicKeyAuthAlgorithms + go t.readLoop() + go t.kexLoop() + return t +} + +func (t *handshakeTransport) getSessionID() []byte { + return t.sessionID +} + +// waitSession waits for the session to be established. This should be +// the first thing to call after instantiating handshakeTransport. +func (t *handshakeTransport) waitSession() error { + p, err := t.readPacket() + if err != nil { + return err + } + if p[0] != msgNewKeys { + return fmt.Errorf("ssh: first packet should be msgNewKeys") + } + + return nil +} + +func (t *handshakeTransport) id() string { + if len(t.hostKeys) > 0 { + return "server" + } + return "client" +} + +func (t *handshakeTransport) printPacket(p []byte, write bool) { + action := "got" + if write { + action = "sent" + } + + if p[0] == msgChannelData || p[0] == msgChannelExtendedData { + log.Printf("%s %s data (packet %d bytes)", t.id(), action, len(p)) + } else { + msg, err := decode(p) + log.Printf("%s %s %T %v (%v)", t.id(), action, msg, msg, err) + } +} + +func (t *handshakeTransport) readPacket() ([]byte, error) { + p, ok := <-t.incoming + if !ok { + return nil, t.readError + } + return p, nil +} + +func (t *handshakeTransport) readLoop() { + first := true + for { + p, err := t.readOnePacket(first) + first = false + if err != nil { + t.readError = err + close(t.incoming) + break + } + // If this is the first kex, and strict KEX mode is enabled, + // we don't ignore any messages, as they may be used to manipulate + // the packet sequence numbers. + if !(t.sessionID == nil && t.strictMode) && (p[0] == msgIgnore || p[0] == msgDebug) { + continue + } + t.incoming <- p + } + + // Stop writers too. + t.recordWriteError(t.readError) + + // Unblock the writer should it wait for this. + close(t.startKex) + + // Don't close t.requestKex; it's also written to from writePacket. +} + +func (t *handshakeTransport) pushPacket(p []byte) error { + if debugHandshake { + t.printPacket(p, true) + } + return t.conn.writePacket(p) +} + +func (t *handshakeTransport) getWriteError() error { + t.mu.Lock() + defer t.mu.Unlock() + return t.writeError +} + +func (t *handshakeTransport) recordWriteError(err error) { + t.mu.Lock() + defer t.mu.Unlock() + if t.writeError == nil && err != nil { + t.writeError = err + } +} + +func (t *handshakeTransport) requestKeyExchange() { + select { + case t.requestKex <- struct{}{}: + default: + // something already requested a kex, so do nothing. + } +} + +func (t *handshakeTransport) resetWriteThresholds() { + t.writePacketsLeft = packetRekeyThreshold + if t.config.RekeyThreshold > 0 { + t.writeBytesLeft = int64(t.config.RekeyThreshold) + } else if t.algorithms != nil { + t.writeBytesLeft = t.algorithms.w.rekeyBytes() + } else { + t.writeBytesLeft = 1 << 30 + } +} + +func (t *handshakeTransport) kexLoop() { + +write: + for t.getWriteError() == nil { + var request *pendingKex + var sent bool + + for request == nil || !sent { + var ok bool + select { + case request, ok = <-t.startKex: + if !ok { + break write + } + case <-t.requestKex: + break + } + + if !sent { + if err := t.sendKexInit(); err != nil { + t.recordWriteError(err) + break + } + sent = true + } + } + + if err := t.getWriteError(); err != nil { + if request != nil { + request.done <- err + } + break + } + + // We're not servicing t.requestKex, but that is OK: + // we never block on sending to t.requestKex. + + // We're not servicing t.startKex, but the remote end + // has just sent us a kexInitMsg, so it can't send + // another key change request, until we close the done + // channel on the pendingKex request. + + err := t.enterKeyExchange(request.otherInit) + + t.mu.Lock() + t.writeError = err + t.sentInitPacket = nil + t.sentInitMsg = nil + + t.resetWriteThresholds() + + // we have completed the key exchange. Since the + // reader is still blocked, it is safe to clear out + // the requestKex channel. This avoids the situation + // where: 1) we consumed our own request for the + // initial kex, and 2) the kex from the remote side + // caused another send on the requestKex channel, + clear: + for { + select { + case <-t.requestKex: + // + default: + break clear + } + } + + request.done <- t.writeError + + // kex finished. Push packets that we received while + // the kex was in progress. Don't look at t.startKex + // and don't increment writtenSinceKex: if we trigger + // another kex while we are still busy with the last + // one, things will become very confusing. + for _, p := range t.pendingPackets { + t.writeError = t.pushPacket(p) + if t.writeError != nil { + break + } + } + t.pendingPackets = t.pendingPackets[:0] + t.mu.Unlock() + } + + // Unblock reader. + t.conn.Close() + + // drain startKex channel. We don't service t.requestKex + // because nobody does blocking sends there. + for request := range t.startKex { + request.done <- t.getWriteError() + } + + // Mark that the loop is done so that Close can return. + close(t.kexLoopDone) +} + +// The protocol uses uint32 for packet counters, so we can't let them +// reach 1<<32. We will actually read and write more packets than +// this, though: the other side may send more packets, and after we +// hit this limit on writing we will send a few more packets for the +// key exchange itself. +const packetRekeyThreshold = (1 << 31) + +func (t *handshakeTransport) resetReadThresholds() { + t.readPacketsLeft = packetRekeyThreshold + if t.config.RekeyThreshold > 0 { + t.readBytesLeft = int64(t.config.RekeyThreshold) + } else if t.algorithms != nil { + t.readBytesLeft = t.algorithms.r.rekeyBytes() + } else { + t.readBytesLeft = 1 << 30 + } +} + +func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) { + p, err := t.conn.readPacket() + if err != nil { + return nil, err + } + + if t.readPacketsLeft > 0 { + t.readPacketsLeft-- + } else { + t.requestKeyExchange() + } + + if t.readBytesLeft > 0 { + t.readBytesLeft -= int64(len(p)) + } else { + t.requestKeyExchange() + } + + if debugHandshake { + t.printPacket(p, false) + } + + if first && p[0] != msgKexInit { + return nil, fmt.Errorf("ssh: first packet should be msgKexInit") + } + + if p[0] != msgKexInit { + return p, nil + } + + firstKex := t.sessionID == nil + + kex := pendingKex{ + done: make(chan error, 1), + otherInit: p, + } + t.startKex <- &kex + err = <-kex.done + + if debugHandshake { + log.Printf("%s exited key exchange (first %v), err %v", t.id(), firstKex, err) + } + + if err != nil { + return nil, err + } + + t.resetReadThresholds() + + // By default, a key exchange is hidden from higher layers by + // translating it into msgIgnore. + successPacket := []byte{msgIgnore} + if firstKex { + // sendKexInit() for the first kex waits for + // msgNewKeys so the authentication process is + // guaranteed to happen over an encrypted transport. + successPacket = []byte{msgNewKeys} + } + + return successPacket, nil +} + +const ( + kexStrictClient = "kex-strict-c-v00@openssh.com" + kexStrictServer = "kex-strict-s-v00@openssh.com" +) + +// sendKexInit sends a key change message. +func (t *handshakeTransport) sendKexInit() error { + t.mu.Lock() + defer t.mu.Unlock() + if t.sentInitMsg != nil { + // kexInits may be sent either in response to the other side, + // or because our side wants to initiate a key change, so we + // may have already sent a kexInit. In that case, don't send a + // second kexInit. + return nil + } + + msg := &kexInitMsg{ + CiphersClientServer: t.config.Ciphers, + CiphersServerClient: t.config.Ciphers, + MACsClientServer: t.config.MACs, + MACsServerClient: t.config.MACs, + CompressionClientServer: supportedCompressions, + CompressionServerClient: supportedCompressions, + } + io.ReadFull(rand.Reader, msg.Cookie[:]) + + // We mutate the KexAlgos slice, in order to add the kex-strict extension algorithm, + // and possibly to add the ext-info extension algorithm. Since the slice may be the + // user owned KeyExchanges, we create our own slice in order to avoid using user + // owned memory by mistake. + msg.KexAlgos = make([]string, 0, len(t.config.KeyExchanges)+2) // room for kex-strict and ext-info + msg.KexAlgos = append(msg.KexAlgos, t.config.KeyExchanges...) + + isServer := len(t.hostKeys) > 0 + if isServer { + for _, k := range t.hostKeys { + // If k is a MultiAlgorithmSigner, we restrict the signature + // algorithms. If k is a AlgorithmSigner, presume it supports all + // signature algorithms associated with the key format. If k is not + // an AlgorithmSigner, we can only assume it only supports the + // algorithms that matches the key format. (This means that Sign + // can't pick a different default). + keyFormat := k.PublicKey().Type() + + switch s := k.(type) { + case MultiAlgorithmSigner: + for _, algo := range algorithmsForKeyFormat(keyFormat) { + if contains(s.Algorithms(), underlyingAlgo(algo)) { + msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algo) + } + } + case AlgorithmSigner: + msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algorithmsForKeyFormat(keyFormat)...) + default: + msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, keyFormat) + } + } + + if t.sessionID == nil { + msg.KexAlgos = append(msg.KexAlgos, kexStrictServer) + } + } else { + msg.ServerHostKeyAlgos = t.hostKeyAlgorithms + + // As a client we opt in to receiving SSH_MSG_EXT_INFO so we know what + // algorithms the server supports for public key authentication. See RFC + // 8308, Section 2.1. + // + // We also send the strict KEX mode extension algorithm, in order to opt + // into the strict KEX mode. + if firstKeyExchange := t.sessionID == nil; firstKeyExchange { + msg.KexAlgos = append(msg.KexAlgos, "ext-info-c") + msg.KexAlgos = append(msg.KexAlgos, kexStrictClient) + } + + } + + packet := Marshal(msg) + + // writePacket destroys the contents, so save a copy. + packetCopy := make([]byte, len(packet)) + copy(packetCopy, packet) + + if err := t.pushPacket(packetCopy); err != nil { + return err + } + + t.sentInitMsg = msg + t.sentInitPacket = packet + + return nil +} + +func (t *handshakeTransport) writePacket(p []byte) error { + switch p[0] { + case msgKexInit: + return errors.New("ssh: only handshakeTransport can send kexInit") + case msgNewKeys: + return errors.New("ssh: only handshakeTransport can send newKeys") + } + + t.mu.Lock() + defer t.mu.Unlock() + if t.writeError != nil { + return t.writeError + } + + if t.sentInitMsg != nil { + // Copy the packet so the writer can reuse the buffer. + cp := make([]byte, len(p)) + copy(cp, p) + t.pendingPackets = append(t.pendingPackets, cp) + return nil + } + + if t.writeBytesLeft > 0 { + t.writeBytesLeft -= int64(len(p)) + } else { + t.requestKeyExchange() + } + + if t.writePacketsLeft > 0 { + t.writePacketsLeft-- + } else { + t.requestKeyExchange() + } + + if err := t.pushPacket(p); err != nil { + t.writeError = err + } + + return nil +} + +func (t *handshakeTransport) Close() error { + // Close the connection. This should cause the readLoop goroutine to wake up + // and close t.startKex, which will shut down kexLoop if running. + err := t.conn.Close() + + // Wait for the kexLoop goroutine to complete. + // At that point we know that the readLoop goroutine is complete too, + // because kexLoop itself waits for readLoop to close the startKex channel. + <-t.kexLoopDone + + return err +} + +func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { + if debugHandshake { + log.Printf("%s entered key exchange", t.id()) + } + + otherInit := &kexInitMsg{} + if err := Unmarshal(otherInitPacket, otherInit); err != nil { + return err + } + + magics := handshakeMagics{ + clientVersion: t.clientVersion, + serverVersion: t.serverVersion, + clientKexInit: otherInitPacket, + serverKexInit: t.sentInitPacket, + } + + clientInit := otherInit + serverInit := t.sentInitMsg + isClient := len(t.hostKeys) == 0 + if isClient { + clientInit, serverInit = serverInit, clientInit + + magics.clientKexInit = t.sentInitPacket + magics.serverKexInit = otherInitPacket + } + + var err error + t.algorithms, err = findAgreedAlgorithms(isClient, clientInit, serverInit) + if err != nil { + return err + } + + if t.sessionID == nil && ((isClient && contains(serverInit.KexAlgos, kexStrictServer)) || (!isClient && contains(clientInit.KexAlgos, kexStrictClient))) { + t.strictMode = true + if err := t.conn.setStrictMode(); err != nil { + return err + } + } + + // We don't send FirstKexFollows, but we handle receiving it. + // + // RFC 4253 section 7 defines the kex and the agreement method for + // first_kex_packet_follows. It states that the guessed packet + // should be ignored if the "kex algorithm and/or the host + // key algorithm is guessed wrong (server and client have + // different preferred algorithm), or if any of the other + // algorithms cannot be agreed upon". The other algorithms have + // already been checked above so the kex algorithm and host key + // algorithm are checked here. + if otherInit.FirstKexFollows && (clientInit.KexAlgos[0] != serverInit.KexAlgos[0] || clientInit.ServerHostKeyAlgos[0] != serverInit.ServerHostKeyAlgos[0]) { + // other side sent a kex message for the wrong algorithm, + // which we have to ignore. + if _, err := t.conn.readPacket(); err != nil { + return err + } + } + + kex, ok := kexAlgoMap[t.algorithms.kex] + if !ok { + return fmt.Errorf("ssh: unexpected key exchange algorithm %v", t.algorithms.kex) + } + + var result *kexResult + if len(t.hostKeys) > 0 { + result, err = t.server(kex, &magics) + } else { + result, err = t.client(kex, &magics) + } + + if err != nil { + return err + } + + firstKeyExchange := t.sessionID == nil + if firstKeyExchange { + t.sessionID = result.H + } + result.SessionID = t.sessionID + + if err := t.conn.prepareKeyChange(t.algorithms, result); err != nil { + return err + } + if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil { + return err + } + + // On the server side, after the first SSH_MSG_NEWKEYS, send a SSH_MSG_EXT_INFO + // message with the server-sig-algs extension if the client supports it. See + // RFC 8308, Sections 2.4 and 3.1, and [PROTOCOL], Section 1.9. + if !isClient && firstKeyExchange && contains(clientInit.KexAlgos, "ext-info-c") { + supportedPubKeyAuthAlgosList := strings.Join(t.publicKeyAuthAlgorithms, ",") + extInfo := &extInfoMsg{ + NumExtensions: 2, + Payload: make([]byte, 0, 4+15+4+len(supportedPubKeyAuthAlgosList)+4+16+4+1), + } + extInfo.Payload = appendInt(extInfo.Payload, len("server-sig-algs")) + extInfo.Payload = append(extInfo.Payload, "server-sig-algs"...) + extInfo.Payload = appendInt(extInfo.Payload, len(supportedPubKeyAuthAlgosList)) + extInfo.Payload = append(extInfo.Payload, supportedPubKeyAuthAlgosList...) + extInfo.Payload = appendInt(extInfo.Payload, len("ping@openssh.com")) + extInfo.Payload = append(extInfo.Payload, "ping@openssh.com"...) + extInfo.Payload = appendInt(extInfo.Payload, 1) + extInfo.Payload = append(extInfo.Payload, "0"...) + if err := t.conn.writePacket(Marshal(extInfo)); err != nil { + return err + } + } + + if packet, err := t.conn.readPacket(); err != nil { + return err + } else if packet[0] != msgNewKeys { + return unexpectedMessageError(msgNewKeys, packet[0]) + } + + if firstKeyExchange { + // Indicates to the transport that the first key exchange is completed + // after receiving SSH_MSG_NEWKEYS. + t.conn.setInitialKEXDone() + } + + return nil +} + +// algorithmSignerWrapper is an AlgorithmSigner that only supports the default +// key format algorithm. +// +// This is technically a violation of the AlgorithmSigner interface, but it +// should be unreachable given where we use this. Anyway, at least it returns an +// error instead of panicing or producing an incorrect signature. +type algorithmSignerWrapper struct { + Signer +} + +func (a algorithmSignerWrapper) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { + if algorithm != underlyingAlgo(a.PublicKey().Type()) { + return nil, errors.New("ssh: internal error: algorithmSignerWrapper invoked with non-default algorithm") + } + return a.Sign(rand, data) +} + +func pickHostKey(hostKeys []Signer, algo string) AlgorithmSigner { + for _, k := range hostKeys { + if s, ok := k.(MultiAlgorithmSigner); ok { + if !contains(s.Algorithms(), underlyingAlgo(algo)) { + continue + } + } + + if algo == k.PublicKey().Type() { + return algorithmSignerWrapper{k} + } + + k, ok := k.(AlgorithmSigner) + if !ok { + continue + } + for _, a := range algorithmsForKeyFormat(k.PublicKey().Type()) { + if algo == a { + return k + } + } + } + return nil +} + +func (t *handshakeTransport) server(kex kexAlgorithm, magics *handshakeMagics) (*kexResult, error) { + hostKey := pickHostKey(t.hostKeys, t.algorithms.hostKey) + if hostKey == nil { + return nil, errors.New("ssh: internal error: negotiated unsupported signature type") + } + + r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey, t.algorithms.hostKey) + return r, err +} + +func (t *handshakeTransport) client(kex kexAlgorithm, magics *handshakeMagics) (*kexResult, error) { + result, err := kex.Client(t.conn, t.config.Rand, magics) + if err != nil { + return nil, err + } + + hostKey, err := ParsePublicKey(result.HostKey) + if err != nil { + return nil, err + } + + if err := verifyHostKeySignature(hostKey, t.algorithms.hostKey, result); err != nil { + return nil, err + } + + err = t.hostKeyCallback(t.dialAddress, t.remoteAddr, hostKey) + if err != nil { + return nil, err + } + + return result, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go b/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go new file mode 100644 index 000000000..af81d2665 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go @@ -0,0 +1,93 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bcrypt_pbkdf implements bcrypt_pbkdf(3) from OpenBSD. +// +// See https://flak.tedunangst.com/post/bcrypt-pbkdf and +// https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/lib/libutil/bcrypt_pbkdf.c. +package bcrypt_pbkdf + +import ( + "crypto/sha512" + "errors" + "golang.org/x/crypto/blowfish" +) + +const blockSize = 32 + +// Key derives a key from the password, salt and rounds count, returning a +// []byte of length keyLen that can be used as cryptographic key. +func Key(password, salt []byte, rounds, keyLen int) ([]byte, error) { + if rounds < 1 { + return nil, errors.New("bcrypt_pbkdf: number of rounds is too small") + } + if len(password) == 0 { + return nil, errors.New("bcrypt_pbkdf: empty password") + } + if len(salt) == 0 || len(salt) > 1<<20 { + return nil, errors.New("bcrypt_pbkdf: bad salt length") + } + if keyLen > 1024 { + return nil, errors.New("bcrypt_pbkdf: keyLen is too large") + } + + numBlocks := (keyLen + blockSize - 1) / blockSize + key := make([]byte, numBlocks*blockSize) + + h := sha512.New() + h.Write(password) + shapass := h.Sum(nil) + + shasalt := make([]byte, 0, sha512.Size) + cnt, tmp := make([]byte, 4), make([]byte, blockSize) + for block := 1; block <= numBlocks; block++ { + h.Reset() + h.Write(salt) + cnt[0] = byte(block >> 24) + cnt[1] = byte(block >> 16) + cnt[2] = byte(block >> 8) + cnt[3] = byte(block) + h.Write(cnt) + bcryptHash(tmp, shapass, h.Sum(shasalt)) + + out := make([]byte, blockSize) + copy(out, tmp) + for i := 2; i <= rounds; i++ { + h.Reset() + h.Write(tmp) + bcryptHash(tmp, shapass, h.Sum(shasalt)) + for j := 0; j < len(out); j++ { + out[j] ^= tmp[j] + } + } + + for i, v := range out { + key[i*numBlocks+(block-1)] = v + } + } + return key[:keyLen], nil +} + +var magic = []byte("OxychromaticBlowfishSwatDynamite") + +func bcryptHash(out, shapass, shasalt []byte) { + c, err := blowfish.NewSaltedCipher(shapass, shasalt) + if err != nil { + panic(err) + } + for i := 0; i < 64; i++ { + blowfish.ExpandKey(shasalt, c) + blowfish.ExpandKey(shapass, c) + } + copy(out, magic) + for i := 0; i < 32; i += 8 { + for j := 0; j < 64; j++ { + c.Encrypt(out[i:i+8], out[i:i+8]) + } + } + // Swap bytes due to different endianness. + for i := 0; i < 32; i += 4 { + out[i+3], out[i+2], out[i+1], out[i] = out[i], out[i+1], out[i+2], out[i+3] + } +} diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go new file mode 100644 index 000000000..8a05f7990 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/kex.go @@ -0,0 +1,786 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/subtle" + "encoding/binary" + "errors" + "fmt" + "io" + "math/big" + + "golang.org/x/crypto/curve25519" +) + +const ( + kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" + kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" + kexAlgoDH14SHA256 = "diffie-hellman-group14-sha256" + kexAlgoDH16SHA512 = "diffie-hellman-group16-sha512" + kexAlgoECDH256 = "ecdh-sha2-nistp256" + kexAlgoECDH384 = "ecdh-sha2-nistp384" + kexAlgoECDH521 = "ecdh-sha2-nistp521" + kexAlgoCurve25519SHA256LibSSH = "curve25519-sha256@libssh.org" + kexAlgoCurve25519SHA256 = "curve25519-sha256" + + // For the following kex only the client half contains a production + // ready implementation. The server half only consists of a minimal + // implementation to satisfy the automated tests. + kexAlgoDHGEXSHA1 = "diffie-hellman-group-exchange-sha1" + kexAlgoDHGEXSHA256 = "diffie-hellman-group-exchange-sha256" +) + +// kexResult captures the outcome of a key exchange. +type kexResult struct { + // Session hash. See also RFC 4253, section 8. + H []byte + + // Shared secret. See also RFC 4253, section 8. + K []byte + + // Host key as hashed into H. + HostKey []byte + + // Signature of H. + Signature []byte + + // A cryptographic hash function that matches the security + // level of the key exchange algorithm. It is used for + // calculating H, and for deriving keys from H and K. + Hash crypto.Hash + + // The session ID, which is the first H computed. This is used + // to derive key material inside the transport. + SessionID []byte +} + +// handshakeMagics contains data that is always included in the +// session hash. +type handshakeMagics struct { + clientVersion, serverVersion []byte + clientKexInit, serverKexInit []byte +} + +func (m *handshakeMagics) write(w io.Writer) { + writeString(w, m.clientVersion) + writeString(w, m.serverVersion) + writeString(w, m.clientKexInit) + writeString(w, m.serverKexInit) +} + +// kexAlgorithm abstracts different key exchange algorithms. +type kexAlgorithm interface { + // Server runs server-side key agreement, signing the result + // with a hostkey. algo is the negotiated algorithm, and may + // be a certificate type. + Server(p packetConn, rand io.Reader, magics *handshakeMagics, s AlgorithmSigner, algo string) (*kexResult, error) + + // Client runs the client-side key agreement. Caller is + // responsible for verifying the host key signature. + Client(p packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) +} + +// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement. +type dhGroup struct { + g, p, pMinus1 *big.Int + hashFunc crypto.Hash +} + +func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { + if theirPublic.Cmp(bigOne) <= 0 || theirPublic.Cmp(group.pMinus1) >= 0 { + return nil, errors.New("ssh: DH parameter out of bounds") + } + return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil +} + +func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { + var x *big.Int + for { + var err error + if x, err = rand.Int(randSource, group.pMinus1); err != nil { + return nil, err + } + if x.Sign() > 0 { + break + } + } + + X := new(big.Int).Exp(group.g, x, group.p) + kexDHInit := kexDHInitMsg{ + X: X, + } + if err := c.writePacket(Marshal(&kexDHInit)); err != nil { + return nil, err + } + + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var kexDHReply kexDHReplyMsg + if err = Unmarshal(packet, &kexDHReply); err != nil { + return nil, err + } + + ki, err := group.diffieHellman(kexDHReply.Y, x) + if err != nil { + return nil, err + } + + h := group.hashFunc.New() + magics.write(h) + writeString(h, kexDHReply.HostKey) + writeInt(h, X) + writeInt(h, kexDHReply.Y) + K := make([]byte, intLength(ki)) + marshalInt(K, ki) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: kexDHReply.HostKey, + Signature: kexDHReply.Signature, + Hash: group.hashFunc, + }, nil +} + +func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { + packet, err := c.readPacket() + if err != nil { + return + } + var kexDHInit kexDHInitMsg + if err = Unmarshal(packet, &kexDHInit); err != nil { + return + } + + var y *big.Int + for { + if y, err = rand.Int(randSource, group.pMinus1); err != nil { + return + } + if y.Sign() > 0 { + break + } + } + + Y := new(big.Int).Exp(group.g, y, group.p) + ki, err := group.diffieHellman(kexDHInit.X, y) + if err != nil { + return nil, err + } + + hostKeyBytes := priv.PublicKey().Marshal() + + h := group.hashFunc.New() + magics.write(h) + writeString(h, hostKeyBytes) + writeInt(h, kexDHInit.X) + writeInt(h, Y) + + K := make([]byte, intLength(ki)) + marshalInt(K, ki) + h.Write(K) + + H := h.Sum(nil) + + // H is already a hash, but the hostkey signing will apply its + // own key-specific hash algorithm. + sig, err := signAndMarshal(priv, randSource, H, algo) + if err != nil { + return nil, err + } + + kexDHReply := kexDHReplyMsg{ + HostKey: hostKeyBytes, + Y: Y, + Signature: sig, + } + packet = Marshal(&kexDHReply) + + err = c.writePacket(packet) + return &kexResult{ + H: H, + K: K, + HostKey: hostKeyBytes, + Signature: sig, + Hash: group.hashFunc, + }, err +} + +// ecdh performs Elliptic Curve Diffie-Hellman key exchange as +// described in RFC 5656, section 4. +type ecdh struct { + curve elliptic.Curve +} + +func (kex *ecdh) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { + ephKey, err := ecdsa.GenerateKey(kex.curve, rand) + if err != nil { + return nil, err + } + + kexInit := kexECDHInitMsg{ + ClientPubKey: elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y), + } + + serialized := Marshal(&kexInit) + if err := c.writePacket(serialized); err != nil { + return nil, err + } + + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var reply kexECDHReplyMsg + if err = Unmarshal(packet, &reply); err != nil { + return nil, err + } + + x, y, err := unmarshalECKey(kex.curve, reply.EphemeralPubKey) + if err != nil { + return nil, err + } + + // generate shared secret + secret, _ := kex.curve.ScalarMult(x, y, ephKey.D.Bytes()) + + h := ecHash(kex.curve).New() + magics.write(h) + writeString(h, reply.HostKey) + writeString(h, kexInit.ClientPubKey) + writeString(h, reply.EphemeralPubKey) + K := make([]byte, intLength(secret)) + marshalInt(K, secret) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: reply.HostKey, + Signature: reply.Signature, + Hash: ecHash(kex.curve), + }, nil +} + +// unmarshalECKey parses and checks an EC key. +func unmarshalECKey(curve elliptic.Curve, pubkey []byte) (x, y *big.Int, err error) { + x, y = elliptic.Unmarshal(curve, pubkey) + if x == nil { + return nil, nil, errors.New("ssh: elliptic.Unmarshal failure") + } + if !validateECPublicKey(curve, x, y) { + return nil, nil, errors.New("ssh: public key not on curve") + } + return x, y, nil +} + +// validateECPublicKey checks that the point is a valid public key for +// the given curve. See [SEC1], 3.2.2 +func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool { + if x.Sign() == 0 && y.Sign() == 0 { + return false + } + + if x.Cmp(curve.Params().P) >= 0 { + return false + } + + if y.Cmp(curve.Params().P) >= 0 { + return false + } + + if !curve.IsOnCurve(x, y) { + return false + } + + // We don't check if N * PubKey == 0, since + // + // - the NIST curves have cofactor = 1, so this is implicit. + // (We don't foresee an implementation that supports non NIST + // curves) + // + // - for ephemeral keys, we don't need to worry about small + // subgroup attacks. + return true +} + +func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var kexECDHInit kexECDHInitMsg + if err = Unmarshal(packet, &kexECDHInit); err != nil { + return nil, err + } + + clientX, clientY, err := unmarshalECKey(kex.curve, kexECDHInit.ClientPubKey) + if err != nil { + return nil, err + } + + // We could cache this key across multiple users/multiple + // connection attempts, but the benefit is small. OpenSSH + // generates a new key for each incoming connection. + ephKey, err := ecdsa.GenerateKey(kex.curve, rand) + if err != nil { + return nil, err + } + + hostKeyBytes := priv.PublicKey().Marshal() + + serializedEphKey := elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y) + + // generate shared secret + secret, _ := kex.curve.ScalarMult(clientX, clientY, ephKey.D.Bytes()) + + h := ecHash(kex.curve).New() + magics.write(h) + writeString(h, hostKeyBytes) + writeString(h, kexECDHInit.ClientPubKey) + writeString(h, serializedEphKey) + + K := make([]byte, intLength(secret)) + marshalInt(K, secret) + h.Write(K) + + H := h.Sum(nil) + + // H is already a hash, but the hostkey signing will apply its + // own key-specific hash algorithm. + sig, err := signAndMarshal(priv, rand, H, algo) + if err != nil { + return nil, err + } + + reply := kexECDHReplyMsg{ + EphemeralPubKey: serializedEphKey, + HostKey: hostKeyBytes, + Signature: sig, + } + + serialized := Marshal(&reply) + if err := c.writePacket(serialized); err != nil { + return nil, err + } + + return &kexResult{ + H: H, + K: K, + HostKey: reply.HostKey, + Signature: sig, + Hash: ecHash(kex.curve), + }, nil +} + +// ecHash returns the hash to match the given elliptic curve, see RFC +// 5656, section 6.2.1 +func ecHash(curve elliptic.Curve) crypto.Hash { + bitSize := curve.Params().BitSize + switch { + case bitSize <= 256: + return crypto.SHA256 + case bitSize <= 384: + return crypto.SHA384 + } + return crypto.SHA512 +} + +var kexAlgoMap = map[string]kexAlgorithm{} + +func init() { + // This is the group called diffie-hellman-group1-sha1 in + // RFC 4253 and Oakley Group 2 in RFC 2409. + p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16) + kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{ + g: new(big.Int).SetInt64(2), + p: p, + pMinus1: new(big.Int).Sub(p, bigOne), + hashFunc: crypto.SHA1, + } + + // This are the groups called diffie-hellman-group14-sha1 and + // diffie-hellman-group14-sha256 in RFC 4253 and RFC 8268, + // and Oakley Group 14 in RFC 3526. + p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) + group14 := &dhGroup{ + g: new(big.Int).SetInt64(2), + p: p, + pMinus1: new(big.Int).Sub(p, bigOne), + } + + kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ + g: group14.g, p: group14.p, pMinus1: group14.pMinus1, + hashFunc: crypto.SHA1, + } + kexAlgoMap[kexAlgoDH14SHA256] = &dhGroup{ + g: group14.g, p: group14.p, pMinus1: group14.pMinus1, + hashFunc: crypto.SHA256, + } + + // This is the group called diffie-hellman-group16-sha512 in RFC + // 8268 and Oakley Group 16 in RFC 3526. + p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF", 16) + + kexAlgoMap[kexAlgoDH16SHA512] = &dhGroup{ + g: new(big.Int).SetInt64(2), + p: p, + pMinus1: new(big.Int).Sub(p, bigOne), + hashFunc: crypto.SHA512, + } + + kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} + kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} + kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} + kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{} + kexAlgoMap[kexAlgoCurve25519SHA256LibSSH] = &curve25519sha256{} + kexAlgoMap[kexAlgoDHGEXSHA1] = &dhGEXSHA{hashFunc: crypto.SHA1} + kexAlgoMap[kexAlgoDHGEXSHA256] = &dhGEXSHA{hashFunc: crypto.SHA256} +} + +// curve25519sha256 implements the curve25519-sha256 (formerly known as +// curve25519-sha256@libssh.org) key exchange method, as described in RFC 8731. +type curve25519sha256 struct{} + +type curve25519KeyPair struct { + priv [32]byte + pub [32]byte +} + +func (kp *curve25519KeyPair) generate(rand io.Reader) error { + if _, err := io.ReadFull(rand, kp.priv[:]); err != nil { + return err + } + curve25519.ScalarBaseMult(&kp.pub, &kp.priv) + return nil +} + +// curve25519Zeros is just an array of 32 zero bytes so that we have something +// convenient to compare against in order to reject curve25519 points with the +// wrong order. +var curve25519Zeros [32]byte + +func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { + var kp curve25519KeyPair + if err := kp.generate(rand); err != nil { + return nil, err + } + if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil { + return nil, err + } + + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var reply kexECDHReplyMsg + if err = Unmarshal(packet, &reply); err != nil { + return nil, err + } + if len(reply.EphemeralPubKey) != 32 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong length") + } + + var servPub, secret [32]byte + copy(servPub[:], reply.EphemeralPubKey) + curve25519.ScalarMult(&secret, &kp.priv, &servPub) + if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong order") + } + + h := crypto.SHA256.New() + magics.write(h) + writeString(h, reply.HostKey) + writeString(h, kp.pub[:]) + writeString(h, reply.EphemeralPubKey) + + ki := new(big.Int).SetBytes(secret[:]) + K := make([]byte, intLength(ki)) + marshalInt(K, ki) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: reply.HostKey, + Signature: reply.Signature, + Hash: crypto.SHA256, + }, nil +} + +func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { + packet, err := c.readPacket() + if err != nil { + return + } + var kexInit kexECDHInitMsg + if err = Unmarshal(packet, &kexInit); err != nil { + return + } + + if len(kexInit.ClientPubKey) != 32 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong length") + } + + var kp curve25519KeyPair + if err := kp.generate(rand); err != nil { + return nil, err + } + + var clientPub, secret [32]byte + copy(clientPub[:], kexInit.ClientPubKey) + curve25519.ScalarMult(&secret, &kp.priv, &clientPub) + if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong order") + } + + hostKeyBytes := priv.PublicKey().Marshal() + + h := crypto.SHA256.New() + magics.write(h) + writeString(h, hostKeyBytes) + writeString(h, kexInit.ClientPubKey) + writeString(h, kp.pub[:]) + + ki := new(big.Int).SetBytes(secret[:]) + K := make([]byte, intLength(ki)) + marshalInt(K, ki) + h.Write(K) + + H := h.Sum(nil) + + sig, err := signAndMarshal(priv, rand, H, algo) + if err != nil { + return nil, err + } + + reply := kexECDHReplyMsg{ + EphemeralPubKey: kp.pub[:], + HostKey: hostKeyBytes, + Signature: sig, + } + if err := c.writePacket(Marshal(&reply)); err != nil { + return nil, err + } + return &kexResult{ + H: H, + K: K, + HostKey: hostKeyBytes, + Signature: sig, + Hash: crypto.SHA256, + }, nil +} + +// dhGEXSHA implements the diffie-hellman-group-exchange-sha1 and +// diffie-hellman-group-exchange-sha256 key agreement protocols, +// as described in RFC 4419 +type dhGEXSHA struct { + hashFunc crypto.Hash +} + +const ( + dhGroupExchangeMinimumBits = 2048 + dhGroupExchangePreferredBits = 2048 + dhGroupExchangeMaximumBits = 8192 +) + +func (gex *dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { + // Send GexRequest + kexDHGexRequest := kexDHGexRequestMsg{ + MinBits: dhGroupExchangeMinimumBits, + PreferedBits: dhGroupExchangePreferredBits, + MaxBits: dhGroupExchangeMaximumBits, + } + if err := c.writePacket(Marshal(&kexDHGexRequest)); err != nil { + return nil, err + } + + // Receive GexGroup + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var msg kexDHGexGroupMsg + if err = Unmarshal(packet, &msg); err != nil { + return nil, err + } + + // reject if p's bit length < dhGroupExchangeMinimumBits or > dhGroupExchangeMaximumBits + if msg.P.BitLen() < dhGroupExchangeMinimumBits || msg.P.BitLen() > dhGroupExchangeMaximumBits { + return nil, fmt.Errorf("ssh: server-generated gex p is out of range (%d bits)", msg.P.BitLen()) + } + + // Check if g is safe by verifying that 1 < g < p-1 + pMinusOne := new(big.Int).Sub(msg.P, bigOne) + if msg.G.Cmp(bigOne) <= 0 || msg.G.Cmp(pMinusOne) >= 0 { + return nil, fmt.Errorf("ssh: server provided gex g is not safe") + } + + // Send GexInit + pHalf := new(big.Int).Rsh(msg.P, 1) + x, err := rand.Int(randSource, pHalf) + if err != nil { + return nil, err + } + X := new(big.Int).Exp(msg.G, x, msg.P) + kexDHGexInit := kexDHGexInitMsg{ + X: X, + } + if err := c.writePacket(Marshal(&kexDHGexInit)); err != nil { + return nil, err + } + + // Receive GexReply + packet, err = c.readPacket() + if err != nil { + return nil, err + } + + var kexDHGexReply kexDHGexReplyMsg + if err = Unmarshal(packet, &kexDHGexReply); err != nil { + return nil, err + } + + if kexDHGexReply.Y.Cmp(bigOne) <= 0 || kexDHGexReply.Y.Cmp(pMinusOne) >= 0 { + return nil, errors.New("ssh: DH parameter out of bounds") + } + kInt := new(big.Int).Exp(kexDHGexReply.Y, x, msg.P) + + // Check if k is safe by verifying that k > 1 and k < p - 1 + if kInt.Cmp(bigOne) <= 0 || kInt.Cmp(pMinusOne) >= 0 { + return nil, fmt.Errorf("ssh: derived k is not safe") + } + + h := gex.hashFunc.New() + magics.write(h) + writeString(h, kexDHGexReply.HostKey) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) + writeInt(h, msg.P) + writeInt(h, msg.G) + writeInt(h, X) + writeInt(h, kexDHGexReply.Y) + K := make([]byte, intLength(kInt)) + marshalInt(K, kInt) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: kexDHGexReply.HostKey, + Signature: kexDHGexReply.Signature, + Hash: gex.hashFunc, + }, nil +} + +// Server half implementation of the Diffie Hellman Key Exchange with SHA1 and SHA256. +// +// This is a minimal implementation to satisfy the automated tests. +func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { + // Receive GexRequest + packet, err := c.readPacket() + if err != nil { + return + } + var kexDHGexRequest kexDHGexRequestMsg + if err = Unmarshal(packet, &kexDHGexRequest); err != nil { + return + } + + // Send GexGroup + // This is the group called diffie-hellman-group14-sha1 in RFC + // 4253 and Oakley Group 14 in RFC 3526. + p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) + g := big.NewInt(2) + + msg := &kexDHGexGroupMsg{ + P: p, + G: g, + } + if err := c.writePacket(Marshal(msg)); err != nil { + return nil, err + } + + // Receive GexInit + packet, err = c.readPacket() + if err != nil { + return + } + var kexDHGexInit kexDHGexInitMsg + if err = Unmarshal(packet, &kexDHGexInit); err != nil { + return + } + + pHalf := new(big.Int).Rsh(p, 1) + + y, err := rand.Int(randSource, pHalf) + if err != nil { + return + } + Y := new(big.Int).Exp(g, y, p) + + pMinusOne := new(big.Int).Sub(p, bigOne) + if kexDHGexInit.X.Cmp(bigOne) <= 0 || kexDHGexInit.X.Cmp(pMinusOne) >= 0 { + return nil, errors.New("ssh: DH parameter out of bounds") + } + kInt := new(big.Int).Exp(kexDHGexInit.X, y, p) + + hostKeyBytes := priv.PublicKey().Marshal() + + h := gex.hashFunc.New() + magics.write(h) + writeString(h, hostKeyBytes) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) + writeInt(h, p) + writeInt(h, g) + writeInt(h, kexDHGexInit.X) + writeInt(h, Y) + + K := make([]byte, intLength(kInt)) + marshalInt(K, kInt) + h.Write(K) + + H := h.Sum(nil) + + // H is already a hash, but the hostkey signing will apply its + // own key-specific hash algorithm. + sig, err := signAndMarshal(priv, randSource, H, algo) + if err != nil { + return nil, err + } + + kexDHGexReply := kexDHGexReplyMsg{ + HostKey: hostKeyBytes, + Y: Y, + Signature: sig, + } + packet = Marshal(&kexDHGexReply) + + err = c.writePacket(packet) + + return &kexResult{ + H: H, + K: K, + HostKey: hostKeyBytes, + Signature: sig, + Hash: gex.hashFunc, + }, err +} diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go new file mode 100644 index 000000000..98e6706d5 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -0,0 +1,1778 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto" + "crypto/aes" + "crypto/cipher" + "crypto/dsa" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/md5" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/asn1" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" + "strings" + + "golang.org/x/crypto/ssh/internal/bcrypt_pbkdf" +) + +// Public key algorithms names. These values can appear in PublicKey.Type, +// ClientConfig.HostKeyAlgorithms, Signature.Format, or as AlgorithmSigner +// arguments. +const ( + KeyAlgoRSA = "ssh-rsa" + KeyAlgoDSA = "ssh-dss" + KeyAlgoECDSA256 = "ecdsa-sha2-nistp256" + KeyAlgoSKECDSA256 = "sk-ecdsa-sha2-nistp256@openssh.com" + KeyAlgoECDSA384 = "ecdsa-sha2-nistp384" + KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" + KeyAlgoED25519 = "ssh-ed25519" + KeyAlgoSKED25519 = "sk-ssh-ed25519@openssh.com" + + // KeyAlgoRSASHA256 and KeyAlgoRSASHA512 are only public key algorithms, not + // public key formats, so they can't appear as a PublicKey.Type. The + // corresponding PublicKey.Type is KeyAlgoRSA. See RFC 8332, Section 2. + KeyAlgoRSASHA256 = "rsa-sha2-256" + KeyAlgoRSASHA512 = "rsa-sha2-512" +) + +const ( + // Deprecated: use KeyAlgoRSA. + SigAlgoRSA = KeyAlgoRSA + // Deprecated: use KeyAlgoRSASHA256. + SigAlgoRSASHA2256 = KeyAlgoRSASHA256 + // Deprecated: use KeyAlgoRSASHA512. + SigAlgoRSASHA2512 = KeyAlgoRSASHA512 +) + +// parsePubKey parses a public key of the given algorithm. +// Use ParsePublicKey for keys with prepended algorithm. +func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) { + switch algo { + case KeyAlgoRSA: + return parseRSA(in) + case KeyAlgoDSA: + return parseDSA(in) + case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: + return parseECDSA(in) + case KeyAlgoSKECDSA256: + return parseSKECDSA(in) + case KeyAlgoED25519: + return parseED25519(in) + case KeyAlgoSKED25519: + return parseSKEd25519(in) + case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: + cert, err := parseCert(in, certKeyAlgoNames[algo]) + if err != nil { + return nil, nil, err + } + return cert, nil, nil + } + return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo) +} + +// parseAuthorizedKey parses a public key in OpenSSH authorized_keys format +// (see sshd(8) manual page) once the options and key type fields have been +// removed. +func parseAuthorizedKey(in []byte) (out PublicKey, comment string, err error) { + in = bytes.TrimSpace(in) + + i := bytes.IndexAny(in, " \t") + if i == -1 { + i = len(in) + } + base64Key := in[:i] + + key := make([]byte, base64.StdEncoding.DecodedLen(len(base64Key))) + n, err := base64.StdEncoding.Decode(key, base64Key) + if err != nil { + return nil, "", err + } + key = key[:n] + out, err = ParsePublicKey(key) + if err != nil { + return nil, "", err + } + comment = string(bytes.TrimSpace(in[i:])) + return out, comment, nil +} + +// ParseKnownHosts parses an entry in the format of the known_hosts file. +// +// The known_hosts format is documented in the sshd(8) manual page. This +// function will parse a single entry from in. On successful return, marker +// will contain the optional marker value (i.e. "cert-authority" or "revoked") +// or else be empty, hosts will contain the hosts that this entry matches, +// pubKey will contain the public key and comment will contain any trailing +// comment at the end of the line. See the sshd(8) manual page for the various +// forms that a host string can take. +// +// The unparsed remainder of the input will be returned in rest. This function +// can be called repeatedly to parse multiple entries. +// +// If no entries were found in the input then err will be io.EOF. Otherwise a +// non-nil err value indicates a parse error. +func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey, comment string, rest []byte, err error) { + for len(in) > 0 { + end := bytes.IndexByte(in, '\n') + if end != -1 { + rest = in[end+1:] + in = in[:end] + } else { + rest = nil + } + + end = bytes.IndexByte(in, '\r') + if end != -1 { + in = in[:end] + } + + in = bytes.TrimSpace(in) + if len(in) == 0 || in[0] == '#' { + in = rest + continue + } + + i := bytes.IndexAny(in, " \t") + if i == -1 { + in = rest + continue + } + + // Strip out the beginning of the known_host key. + // This is either an optional marker or a (set of) hostname(s). + keyFields := bytes.Fields(in) + if len(keyFields) < 3 || len(keyFields) > 5 { + return "", nil, nil, "", nil, errors.New("ssh: invalid entry in known_hosts data") + } + + // keyFields[0] is either "@cert-authority", "@revoked" or a comma separated + // list of hosts + marker := "" + if keyFields[0][0] == '@' { + marker = string(keyFields[0][1:]) + keyFields = keyFields[1:] + } + + hosts := string(keyFields[0]) + // keyFields[1] contains the key type (e.g. “ssh-rsa”). + // However, that information is duplicated inside the + // base64-encoded key and so is ignored here. + + key := bytes.Join(keyFields[2:], []byte(" ")) + if pubKey, comment, err = parseAuthorizedKey(key); err != nil { + return "", nil, nil, "", nil, err + } + + return marker, strings.Split(hosts, ","), pubKey, comment, rest, nil + } + + return "", nil, nil, "", nil, io.EOF +} + +// ParseAuthorizedKey parses a public key from an authorized_keys +// file used in OpenSSH according to the sshd(8) manual page. +func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) { + for len(in) > 0 { + end := bytes.IndexByte(in, '\n') + if end != -1 { + rest = in[end+1:] + in = in[:end] + } else { + rest = nil + } + + end = bytes.IndexByte(in, '\r') + if end != -1 { + in = in[:end] + } + + in = bytes.TrimSpace(in) + if len(in) == 0 || in[0] == '#' { + in = rest + continue + } + + i := bytes.IndexAny(in, " \t") + if i == -1 { + in = rest + continue + } + + if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { + return out, comment, options, rest, nil + } + + // No key type recognised. Maybe there's an options field at + // the beginning. + var b byte + inQuote := false + var candidateOptions []string + optionStart := 0 + for i, b = range in { + isEnd := !inQuote && (b == ' ' || b == '\t') + if (b == ',' && !inQuote) || isEnd { + if i-optionStart > 0 { + candidateOptions = append(candidateOptions, string(in[optionStart:i])) + } + optionStart = i + 1 + } + if isEnd { + break + } + if b == '"' && (i == 0 || (i > 0 && in[i-1] != '\\')) { + inQuote = !inQuote + } + } + for i < len(in) && (in[i] == ' ' || in[i] == '\t') { + i++ + } + if i == len(in) { + // Invalid line: unmatched quote + in = rest + continue + } + + in = in[i:] + i = bytes.IndexAny(in, " \t") + if i == -1 { + in = rest + continue + } + + if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { + options = candidateOptions + return out, comment, options, rest, nil + } + + in = rest + continue + } + + return nil, "", nil, nil, errors.New("ssh: no key found") +} + +// ParsePublicKey parses an SSH public key formatted for use in +// the SSH wire protocol according to RFC 4253, section 6.6. +func ParsePublicKey(in []byte) (out PublicKey, err error) { + algo, in, ok := parseString(in) + if !ok { + return nil, errShortRead + } + var rest []byte + out, rest, err = parsePubKey(in, string(algo)) + if len(rest) > 0 { + return nil, errors.New("ssh: trailing junk in public key") + } + + return out, err +} + +// MarshalAuthorizedKey serializes key for inclusion in an OpenSSH +// authorized_keys file. The return value ends with newline. +func MarshalAuthorizedKey(key PublicKey) []byte { + b := &bytes.Buffer{} + b.WriteString(key.Type()) + b.WriteByte(' ') + e := base64.NewEncoder(base64.StdEncoding, b) + e.Write(key.Marshal()) + e.Close() + b.WriteByte('\n') + return b.Bytes() +} + +// MarshalPrivateKey returns a PEM block with the private key serialized in the +// OpenSSH format. +func MarshalPrivateKey(key crypto.PrivateKey, comment string) (*pem.Block, error) { + return marshalOpenSSHPrivateKey(key, comment, unencryptedOpenSSHMarshaler) +} + +// MarshalPrivateKeyWithPassphrase returns a PEM block holding the encrypted +// private key serialized in the OpenSSH format. +func MarshalPrivateKeyWithPassphrase(key crypto.PrivateKey, comment string, passphrase []byte) (*pem.Block, error) { + return marshalOpenSSHPrivateKey(key, comment, passphraseProtectedOpenSSHMarshaler(passphrase)) +} + +// PublicKey represents a public key using an unspecified algorithm. +// +// Some PublicKeys provided by this package also implement CryptoPublicKey. +type PublicKey interface { + // Type returns the key format name, e.g. "ssh-rsa". + Type() string + + // Marshal returns the serialized key data in SSH wire format, with the name + // prefix. To unmarshal the returned data, use the ParsePublicKey function. + Marshal() []byte + + // Verify that sig is a signature on the given data using this key. This + // method will hash the data appropriately first. sig.Format is allowed to + // be any signature algorithm compatible with the key type, the caller + // should check if it has more stringent requirements. + Verify(data []byte, sig *Signature) error +} + +// CryptoPublicKey, if implemented by a PublicKey, +// returns the underlying crypto.PublicKey form of the key. +type CryptoPublicKey interface { + CryptoPublicKey() crypto.PublicKey +} + +// A Signer can create signatures that verify against a public key. +// +// Some Signers provided by this package also implement MultiAlgorithmSigner. +type Signer interface { + // PublicKey returns the associated PublicKey. + PublicKey() PublicKey + + // Sign returns a signature for the given data. This method will hash the + // data appropriately first. The signature algorithm is expected to match + // the key format returned by the PublicKey.Type method (and not to be any + // alternative algorithm supported by the key format). + Sign(rand io.Reader, data []byte) (*Signature, error) +} + +// An AlgorithmSigner is a Signer that also supports specifying an algorithm to +// use for signing. +// +// An AlgorithmSigner can't advertise the algorithms it supports, unless it also +// implements MultiAlgorithmSigner, so it should be prepared to be invoked with +// every algorithm supported by the public key format. +type AlgorithmSigner interface { + Signer + + // SignWithAlgorithm is like Signer.Sign, but allows specifying a desired + // signing algorithm. Callers may pass an empty string for the algorithm in + // which case the AlgorithmSigner will use a default algorithm. This default + // doesn't currently control any behavior in this package. + SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) +} + +// MultiAlgorithmSigner is an AlgorithmSigner that also reports the algorithms +// supported by that signer. +type MultiAlgorithmSigner interface { + AlgorithmSigner + + // Algorithms returns the available algorithms in preference order. The list + // must not be empty, and it must not include certificate types. + Algorithms() []string +} + +// NewSignerWithAlgorithms returns a signer restricted to the specified +// algorithms. The algorithms must be set in preference order. The list must not +// be empty, and it must not include certificate types. An error is returned if +// the specified algorithms are incompatible with the public key type. +func NewSignerWithAlgorithms(signer AlgorithmSigner, algorithms []string) (MultiAlgorithmSigner, error) { + if len(algorithms) == 0 { + return nil, errors.New("ssh: please specify at least one valid signing algorithm") + } + var signerAlgos []string + supportedAlgos := algorithmsForKeyFormat(underlyingAlgo(signer.PublicKey().Type())) + if s, ok := signer.(*multiAlgorithmSigner); ok { + signerAlgos = s.Algorithms() + } else { + signerAlgos = supportedAlgos + } + + for _, algo := range algorithms { + if !contains(supportedAlgos, algo) { + return nil, fmt.Errorf("ssh: algorithm %q is not supported for key type %q", + algo, signer.PublicKey().Type()) + } + if !contains(signerAlgos, algo) { + return nil, fmt.Errorf("ssh: algorithm %q is restricted for the provided signer", algo) + } + } + return &multiAlgorithmSigner{ + AlgorithmSigner: signer, + supportedAlgorithms: algorithms, + }, nil +} + +type multiAlgorithmSigner struct { + AlgorithmSigner + supportedAlgorithms []string +} + +func (s *multiAlgorithmSigner) Algorithms() []string { + return s.supportedAlgorithms +} + +func (s *multiAlgorithmSigner) isAlgorithmSupported(algorithm string) bool { + if algorithm == "" { + algorithm = underlyingAlgo(s.PublicKey().Type()) + } + for _, algo := range s.supportedAlgorithms { + if algorithm == algo { + return true + } + } + return false +} + +func (s *multiAlgorithmSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { + if !s.isAlgorithmSupported(algorithm) { + return nil, fmt.Errorf("ssh: algorithm %q is not supported: %v", algorithm, s.supportedAlgorithms) + } + return s.AlgorithmSigner.SignWithAlgorithm(rand, data, algorithm) +} + +type rsaPublicKey rsa.PublicKey + +func (r *rsaPublicKey) Type() string { + return "ssh-rsa" +} + +// parseRSA parses an RSA key according to RFC 4253, section 6.6. +func parseRSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + E *big.Int + N *big.Int + Rest []byte `ssh:"rest"` + } + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + if w.E.BitLen() > 24 { + return nil, nil, errors.New("ssh: exponent too large") + } + e := w.E.Int64() + if e < 3 || e&1 == 0 { + return nil, nil, errors.New("ssh: incorrect exponent") + } + + var key rsa.PublicKey + key.E = int(e) + key.N = w.N + return (*rsaPublicKey)(&key), w.Rest, nil +} + +func (r *rsaPublicKey) Marshal() []byte { + e := new(big.Int).SetInt64(int64(r.E)) + // RSA publickey struct layout should match the struct used by + // parseRSACert in the x/crypto/ssh/agent package. + wirekey := struct { + Name string + E *big.Int + N *big.Int + }{ + KeyAlgoRSA, + e, + r.N, + } + return Marshal(&wirekey) +} + +func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error { + supportedAlgos := algorithmsForKeyFormat(r.Type()) + if !contains(supportedAlgos, sig.Format) { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type()) + } + hash := hashFuncs[sig.Format] + h := hash.New() + h.Write(data) + digest := h.Sum(nil) + + // Signatures in PKCS1v15 must match the key's modulus in + // length. However with SSH, some signers provide RSA + // signatures which are missing the MSB 0's of the bignum + // represented. With ssh-rsa signatures, this is encouraged by + // the spec (even though e.g. OpenSSH will give the full + // length unconditionally). With rsa-sha2-* signatures, the + // verifier is allowed to support these, even though they are + // out of spec. See RFC 4253 Section 6.6 for ssh-rsa and RFC + // 8332 Section 3 for rsa-sha2-* details. + // + // In practice: + // * OpenSSH always allows "short" signatures: + // https://github.com/openssh/openssh-portable/blob/V_9_8_P1/ssh-rsa.c#L526 + // but always generates padded signatures: + // https://github.com/openssh/openssh-portable/blob/V_9_8_P1/ssh-rsa.c#L439 + // + // * PuTTY versions 0.81 and earlier will generate short + // signatures for all RSA signature variants. Note that + // PuTTY is embedded in other software, such as WinSCP and + // FileZilla. At the time of writing, a patch has been + // applied to PuTTY to generate padded signatures for + // rsa-sha2-*, but not yet released: + // https://git.tartarus.org/?p=simon/putty.git;a=commitdiff;h=a5bcf3d384e1bf15a51a6923c3724cbbee022d8e + // + // * SSH.NET versions 2024.0.0 and earlier will generate short + // signatures for all RSA signature variants, fixed in 2024.1.0: + // https://github.com/sshnet/SSH.NET/releases/tag/2024.1.0 + // + // As a result, we pad these up to the key size by inserting + // leading 0's. + // + // Note that support for short signatures with rsa-sha2-* may + // be removed in the future due to such signatures not being + // allowed by the spec. + blob := sig.Blob + keySize := (*rsa.PublicKey)(r).Size() + if len(blob) < keySize { + padded := make([]byte, keySize) + copy(padded[keySize-len(blob):], blob) + blob = padded + } + return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), hash, digest, blob) +} + +func (r *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return (*rsa.PublicKey)(r) +} + +type dsaPublicKey dsa.PublicKey + +func (k *dsaPublicKey) Type() string { + return "ssh-dss" +} + +func checkDSAParams(param *dsa.Parameters) error { + // SSH specifies FIPS 186-2, which only provided a single size + // (1024 bits) DSA key. FIPS 186-3 allows for larger key + // sizes, which would confuse SSH. + if l := param.P.BitLen(); l != 1024 { + return fmt.Errorf("ssh: unsupported DSA key size %d", l) + } + + return nil +} + +// parseDSA parses an DSA key according to RFC 4253, section 6.6. +func parseDSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + P, Q, G, Y *big.Int + Rest []byte `ssh:"rest"` + } + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + param := dsa.Parameters{ + P: w.P, + Q: w.Q, + G: w.G, + } + if err := checkDSAParams(¶m); err != nil { + return nil, nil, err + } + + key := &dsaPublicKey{ + Parameters: param, + Y: w.Y, + } + return key, w.Rest, nil +} + +func (k *dsaPublicKey) Marshal() []byte { + // DSA publickey struct layout should match the struct used by + // parseDSACert in the x/crypto/ssh/agent package. + w := struct { + Name string + P, Q, G, Y *big.Int + }{ + k.Type(), + k.P, + k.Q, + k.G, + k.Y, + } + + return Marshal(&w) +} + +func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != k.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) + } + h := hashFuncs[sig.Format].New() + h.Write(data) + digest := h.Sum(nil) + + // Per RFC 4253, section 6.6, + // The value for 'dss_signature_blob' is encoded as a string containing + // r, followed by s (which are 160-bit integers, without lengths or + // padding, unsigned, and in network byte order). + // For DSS purposes, sig.Blob should be exactly 40 bytes in length. + if len(sig.Blob) != 40 { + return errors.New("ssh: DSA signature parse error") + } + r := new(big.Int).SetBytes(sig.Blob[:20]) + s := new(big.Int).SetBytes(sig.Blob[20:]) + if dsa.Verify((*dsa.PublicKey)(k), digest, r, s) { + return nil + } + return errors.New("ssh: signature did not verify") +} + +func (k *dsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return (*dsa.PublicKey)(k) +} + +type dsaPrivateKey struct { + *dsa.PrivateKey +} + +func (k *dsaPrivateKey) PublicKey() PublicKey { + return (*dsaPublicKey)(&k.PrivateKey.PublicKey) +} + +func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) { + return k.SignWithAlgorithm(rand, data, k.PublicKey().Type()) +} + +func (k *dsaPrivateKey) Algorithms() []string { + return []string{k.PublicKey().Type()} +} + +func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { + if algorithm != "" && algorithm != k.PublicKey().Type() { + return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) + } + + h := hashFuncs[k.PublicKey().Type()].New() + h.Write(data) + digest := h.Sum(nil) + r, s, err := dsa.Sign(rand, k.PrivateKey, digest) + if err != nil { + return nil, err + } + + sig := make([]byte, 40) + rb := r.Bytes() + sb := s.Bytes() + + copy(sig[20-len(rb):20], rb) + copy(sig[40-len(sb):], sb) + + return &Signature{ + Format: k.PublicKey().Type(), + Blob: sig, + }, nil +} + +type ecdsaPublicKey ecdsa.PublicKey + +func (k *ecdsaPublicKey) Type() string { + return "ecdsa-sha2-" + k.nistID() +} + +func (k *ecdsaPublicKey) nistID() string { + switch k.Params().BitSize { + case 256: + return "nistp256" + case 384: + return "nistp384" + case 521: + return "nistp521" + } + panic("ssh: unsupported ecdsa key size") +} + +type ed25519PublicKey ed25519.PublicKey + +func (k ed25519PublicKey) Type() string { + return KeyAlgoED25519 +} + +func parseED25519(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + KeyBytes []byte + Rest []byte `ssh:"rest"` + } + + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + if l := len(w.KeyBytes); l != ed25519.PublicKeySize { + return nil, nil, fmt.Errorf("invalid size %d for Ed25519 public key", l) + } + + return ed25519PublicKey(w.KeyBytes), w.Rest, nil +} + +func (k ed25519PublicKey) Marshal() []byte { + w := struct { + Name string + KeyBytes []byte + }{ + KeyAlgoED25519, + []byte(k), + } + return Marshal(&w) +} + +func (k ed25519PublicKey) Verify(b []byte, sig *Signature) error { + if sig.Format != k.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) + } + if l := len(k); l != ed25519.PublicKeySize { + return fmt.Errorf("ssh: invalid size %d for Ed25519 public key", l) + } + + if ok := ed25519.Verify(ed25519.PublicKey(k), b, sig.Blob); !ok { + return errors.New("ssh: signature did not verify") + } + + return nil +} + +func (k ed25519PublicKey) CryptoPublicKey() crypto.PublicKey { + return ed25519.PublicKey(k) +} + +func supportedEllipticCurve(curve elliptic.Curve) bool { + return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521() +} + +// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1. +func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + Curve string + KeyBytes []byte + Rest []byte `ssh:"rest"` + } + + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + key := new(ecdsa.PublicKey) + + switch w.Curve { + case "nistp256": + key.Curve = elliptic.P256() + case "nistp384": + key.Curve = elliptic.P384() + case "nistp521": + key.Curve = elliptic.P521() + default: + return nil, nil, errors.New("ssh: unsupported curve") + } + + key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) + if key.X == nil || key.Y == nil { + return nil, nil, errors.New("ssh: invalid curve point") + } + return (*ecdsaPublicKey)(key), w.Rest, nil +} + +func (k *ecdsaPublicKey) Marshal() []byte { + // See RFC 5656, section 3.1. + keyBytes := elliptic.Marshal(k.Curve, k.X, k.Y) + // ECDSA publickey struct layout should match the struct used by + // parseECDSACert in the x/crypto/ssh/agent package. + w := struct { + Name string + ID string + Key []byte + }{ + k.Type(), + k.nistID(), + keyBytes, + } + + return Marshal(&w) +} + +func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != k.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) + } + + h := hashFuncs[sig.Format].New() + h.Write(data) + digest := h.Sum(nil) + + // Per RFC 5656, section 3.1.2, + // The ecdsa_signature_blob value has the following specific encoding: + // mpint r + // mpint s + var ecSig struct { + R *big.Int + S *big.Int + } + + if err := Unmarshal(sig.Blob, &ecSig); err != nil { + return err + } + + if ecdsa.Verify((*ecdsa.PublicKey)(k), digest, ecSig.R, ecSig.S) { + return nil + } + return errors.New("ssh: signature did not verify") +} + +func (k *ecdsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return (*ecdsa.PublicKey)(k) +} + +// skFields holds the additional fields present in U2F/FIDO2 signatures. +// See openssh/PROTOCOL.u2f 'SSH U2F Signatures' for details. +type skFields struct { + // Flags contains U2F/FIDO2 flags such as 'user present' + Flags byte + // Counter is a monotonic signature counter which can be + // used to detect concurrent use of a private key, should + // it be extracted from hardware. + Counter uint32 +} + +type skECDSAPublicKey struct { + // application is a URL-like string, typically "ssh:" for SSH. + // see openssh/PROTOCOL.u2f for details. + application string + ecdsa.PublicKey +} + +func (k *skECDSAPublicKey) Type() string { + return KeyAlgoSKECDSA256 +} + +func (k *skECDSAPublicKey) nistID() string { + return "nistp256" +} + +func parseSKECDSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + Curve string + KeyBytes []byte + Application string + Rest []byte `ssh:"rest"` + } + + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + key := new(skECDSAPublicKey) + key.application = w.Application + + if w.Curve != "nistp256" { + return nil, nil, errors.New("ssh: unsupported curve") + } + key.Curve = elliptic.P256() + + key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) + if key.X == nil || key.Y == nil { + return nil, nil, errors.New("ssh: invalid curve point") + } + + return key, w.Rest, nil +} + +func (k *skECDSAPublicKey) Marshal() []byte { + // See RFC 5656, section 3.1. + keyBytes := elliptic.Marshal(k.Curve, k.X, k.Y) + w := struct { + Name string + ID string + Key []byte + Application string + }{ + k.Type(), + k.nistID(), + keyBytes, + k.application, + } + + return Marshal(&w) +} + +func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != k.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) + } + + h := hashFuncs[sig.Format].New() + h.Write([]byte(k.application)) + appDigest := h.Sum(nil) + + h.Reset() + h.Write(data) + dataDigest := h.Sum(nil) + + var ecSig struct { + R *big.Int + S *big.Int + } + if err := Unmarshal(sig.Blob, &ecSig); err != nil { + return err + } + + var skf skFields + if err := Unmarshal(sig.Rest, &skf); err != nil { + return err + } + + blob := struct { + ApplicationDigest []byte `ssh:"rest"` + Flags byte + Counter uint32 + MessageDigest []byte `ssh:"rest"` + }{ + appDigest, + skf.Flags, + skf.Counter, + dataDigest, + } + + original := Marshal(blob) + + h.Reset() + h.Write(original) + digest := h.Sum(nil) + + if ecdsa.Verify((*ecdsa.PublicKey)(&k.PublicKey), digest, ecSig.R, ecSig.S) { + return nil + } + return errors.New("ssh: signature did not verify") +} + +func (k *skECDSAPublicKey) CryptoPublicKey() crypto.PublicKey { + return &k.PublicKey +} + +type skEd25519PublicKey struct { + // application is a URL-like string, typically "ssh:" for SSH. + // see openssh/PROTOCOL.u2f for details. + application string + ed25519.PublicKey +} + +func (k *skEd25519PublicKey) Type() string { + return KeyAlgoSKED25519 +} + +func parseSKEd25519(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + KeyBytes []byte + Application string + Rest []byte `ssh:"rest"` + } + + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + if l := len(w.KeyBytes); l != ed25519.PublicKeySize { + return nil, nil, fmt.Errorf("invalid size %d for Ed25519 public key", l) + } + + key := new(skEd25519PublicKey) + key.application = w.Application + key.PublicKey = ed25519.PublicKey(w.KeyBytes) + + return key, w.Rest, nil +} + +func (k *skEd25519PublicKey) Marshal() []byte { + w := struct { + Name string + KeyBytes []byte + Application string + }{ + KeyAlgoSKED25519, + []byte(k.PublicKey), + k.application, + } + return Marshal(&w) +} + +func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != k.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) + } + if l := len(k.PublicKey); l != ed25519.PublicKeySize { + return fmt.Errorf("invalid size %d for Ed25519 public key", l) + } + + h := hashFuncs[sig.Format].New() + h.Write([]byte(k.application)) + appDigest := h.Sum(nil) + + h.Reset() + h.Write(data) + dataDigest := h.Sum(nil) + + var edSig struct { + Signature []byte `ssh:"rest"` + } + + if err := Unmarshal(sig.Blob, &edSig); err != nil { + return err + } + + var skf skFields + if err := Unmarshal(sig.Rest, &skf); err != nil { + return err + } + + blob := struct { + ApplicationDigest []byte `ssh:"rest"` + Flags byte + Counter uint32 + MessageDigest []byte `ssh:"rest"` + }{ + appDigest, + skf.Flags, + skf.Counter, + dataDigest, + } + + original := Marshal(blob) + + if ok := ed25519.Verify(k.PublicKey, original, edSig.Signature); !ok { + return errors.New("ssh: signature did not verify") + } + + return nil +} + +func (k *skEd25519PublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey, +// *ecdsa.PrivateKey or any other crypto.Signer and returns a +// corresponding Signer instance. ECDSA keys must use P-256, P-384 or +// P-521. DSA keys must use parameter size L1024N160. +func NewSignerFromKey(key interface{}) (Signer, error) { + switch key := key.(type) { + case crypto.Signer: + return NewSignerFromSigner(key) + case *dsa.PrivateKey: + return newDSAPrivateKey(key) + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", key) + } +} + +func newDSAPrivateKey(key *dsa.PrivateKey) (Signer, error) { + if err := checkDSAParams(&key.PublicKey.Parameters); err != nil { + return nil, err + } + + return &dsaPrivateKey{key}, nil +} + +type wrappedSigner struct { + signer crypto.Signer + pubKey PublicKey +} + +// NewSignerFromSigner takes any crypto.Signer implementation and +// returns a corresponding Signer interface. This can be used, for +// example, with keys kept in hardware modules. +func NewSignerFromSigner(signer crypto.Signer) (Signer, error) { + pubKey, err := NewPublicKey(signer.Public()) + if err != nil { + return nil, err + } + + return &wrappedSigner{signer, pubKey}, nil +} + +func (s *wrappedSigner) PublicKey() PublicKey { + return s.pubKey +} + +func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { + return s.SignWithAlgorithm(rand, data, s.pubKey.Type()) +} + +func (s *wrappedSigner) Algorithms() []string { + return algorithmsForKeyFormat(s.pubKey.Type()) +} + +func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { + if algorithm == "" { + algorithm = s.pubKey.Type() + } + + if !contains(s.Algorithms(), algorithm) { + return nil, fmt.Errorf("ssh: unsupported signature algorithm %q for key format %q", algorithm, s.pubKey.Type()) + } + + hashFunc := hashFuncs[algorithm] + var digest []byte + if hashFunc != 0 { + h := hashFunc.New() + h.Write(data) + digest = h.Sum(nil) + } else { + digest = data + } + + signature, err := s.signer.Sign(rand, digest, hashFunc) + if err != nil { + return nil, err + } + + // crypto.Signer.Sign is expected to return an ASN.1-encoded signature + // for ECDSA and DSA, but that's not the encoding expected by SSH, so + // re-encode. + switch s.pubKey.(type) { + case *ecdsaPublicKey, *dsaPublicKey: + type asn1Signature struct { + R, S *big.Int + } + asn1Sig := new(asn1Signature) + _, err := asn1.Unmarshal(signature, asn1Sig) + if err != nil { + return nil, err + } + + switch s.pubKey.(type) { + case *ecdsaPublicKey: + signature = Marshal(asn1Sig) + + case *dsaPublicKey: + signature = make([]byte, 40) + r := asn1Sig.R.Bytes() + s := asn1Sig.S.Bytes() + copy(signature[20-len(r):20], r) + copy(signature[40-len(s):40], s) + } + } + + return &Signature{ + Format: algorithm, + Blob: signature, + }, nil +} + +// NewPublicKey takes an *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey, +// or ed25519.PublicKey returns a corresponding PublicKey instance. +// ECDSA keys must use P-256, P-384 or P-521. +func NewPublicKey(key interface{}) (PublicKey, error) { + switch key := key.(type) { + case *rsa.PublicKey: + return (*rsaPublicKey)(key), nil + case *ecdsa.PublicKey: + if !supportedEllipticCurve(key.Curve) { + return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported") + } + return (*ecdsaPublicKey)(key), nil + case *dsa.PublicKey: + return (*dsaPublicKey)(key), nil + case ed25519.PublicKey: + if l := len(key); l != ed25519.PublicKeySize { + return nil, fmt.Errorf("ssh: invalid size %d for Ed25519 public key", l) + } + return ed25519PublicKey(key), nil + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", key) + } +} + +// ParsePrivateKey returns a Signer from a PEM encoded private key. It supports +// the same keys as ParseRawPrivateKey. If the private key is encrypted, it +// will return a PassphraseMissingError. +func ParsePrivateKey(pemBytes []byte) (Signer, error) { + key, err := ParseRawPrivateKey(pemBytes) + if err != nil { + return nil, err + } + + return NewSignerFromKey(key) +} + +// ParsePrivateKeyWithPassphrase returns a Signer from a PEM encoded private +// key and passphrase. It supports the same keys as +// ParseRawPrivateKeyWithPassphrase. +func ParsePrivateKeyWithPassphrase(pemBytes, passphrase []byte) (Signer, error) { + key, err := ParseRawPrivateKeyWithPassphrase(pemBytes, passphrase) + if err != nil { + return nil, err + } + + return NewSignerFromKey(key) +} + +// encryptedBlock tells whether a private key is +// encrypted by examining its Proc-Type header +// for a mention of ENCRYPTED +// according to RFC 1421 Section 4.6.1.1. +func encryptedBlock(block *pem.Block) bool { + return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED") +} + +// A PassphraseMissingError indicates that parsing this private key requires a +// passphrase. Use ParsePrivateKeyWithPassphrase. +type PassphraseMissingError struct { + // PublicKey will be set if the private key format includes an unencrypted + // public key along with the encrypted private key. + PublicKey PublicKey +} + +func (*PassphraseMissingError) Error() string { + return "ssh: this private key is passphrase protected" +} + +// ParseRawPrivateKey returns a private key from a PEM encoded private key. It supports +// RSA, DSA, ECDSA, and Ed25519 private keys in PKCS#1, PKCS#8, OpenSSL, and OpenSSH +// formats. If the private key is encrypted, it will return a PassphraseMissingError. +func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { + block, _ := pem.Decode(pemBytes) + if block == nil { + return nil, errors.New("ssh: no key found") + } + + if encryptedBlock(block) { + return nil, &PassphraseMissingError{} + } + + switch block.Type { + case "RSA PRIVATE KEY": + return x509.ParsePKCS1PrivateKey(block.Bytes) + // RFC5208 - https://tools.ietf.org/html/rfc5208 + case "PRIVATE KEY": + return x509.ParsePKCS8PrivateKey(block.Bytes) + case "EC PRIVATE KEY": + return x509.ParseECPrivateKey(block.Bytes) + case "DSA PRIVATE KEY": + return ParseDSAPrivateKey(block.Bytes) + case "OPENSSH PRIVATE KEY": + return parseOpenSSHPrivateKey(block.Bytes, unencryptedOpenSSHKey) + default: + return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) + } +} + +// ParseRawPrivateKeyWithPassphrase returns a private key decrypted with +// passphrase from a PEM encoded private key. If the passphrase is wrong, it +// will return x509.IncorrectPasswordError. +func ParseRawPrivateKeyWithPassphrase(pemBytes, passphrase []byte) (interface{}, error) { + block, _ := pem.Decode(pemBytes) + if block == nil { + return nil, errors.New("ssh: no key found") + } + + if block.Type == "OPENSSH PRIVATE KEY" { + return parseOpenSSHPrivateKey(block.Bytes, passphraseProtectedOpenSSHKey(passphrase)) + } + + if !encryptedBlock(block) || !x509.IsEncryptedPEMBlock(block) { + return nil, errors.New("ssh: not an encrypted key") + } + + buf, err := x509.DecryptPEMBlock(block, passphrase) + if err != nil { + if err == x509.IncorrectPasswordError { + return nil, err + } + return nil, fmt.Errorf("ssh: cannot decode encrypted private keys: %v", err) + } + + var result interface{} + + switch block.Type { + case "RSA PRIVATE KEY": + result, err = x509.ParsePKCS1PrivateKey(buf) + case "EC PRIVATE KEY": + result, err = x509.ParseECPrivateKey(buf) + case "DSA PRIVATE KEY": + result, err = ParseDSAPrivateKey(buf) + default: + err = fmt.Errorf("ssh: unsupported key type %q", block.Type) + } + // Because of deficiencies in the format, DecryptPEMBlock does not always + // detect an incorrect password. In these cases decrypted DER bytes is + // random noise. If the parsing of the key returns an asn1.StructuralError + // we return x509.IncorrectPasswordError. + if _, ok := err.(asn1.StructuralError); ok { + return nil, x509.IncorrectPasswordError + } + + return result, err +} + +// ParseDSAPrivateKey returns a DSA private key from its ASN.1 DER encoding, as +// specified by the OpenSSL DSA man page. +func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) { + var k struct { + Version int + P *big.Int + Q *big.Int + G *big.Int + Pub *big.Int + Priv *big.Int + } + rest, err := asn1.Unmarshal(der, &k) + if err != nil { + return nil, errors.New("ssh: failed to parse DSA key: " + err.Error()) + } + if len(rest) > 0 { + return nil, errors.New("ssh: garbage after DSA key") + } + + return &dsa.PrivateKey{ + PublicKey: dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: k.P, + Q: k.Q, + G: k.G, + }, + Y: k.Pub, + }, + X: k.Priv, + }, nil +} + +func unencryptedOpenSSHKey(cipherName, kdfName, kdfOpts string, privKeyBlock []byte) ([]byte, error) { + if kdfName != "none" || cipherName != "none" { + return nil, &PassphraseMissingError{} + } + if kdfOpts != "" { + return nil, errors.New("ssh: invalid openssh private key") + } + return privKeyBlock, nil +} + +func passphraseProtectedOpenSSHKey(passphrase []byte) openSSHDecryptFunc { + return func(cipherName, kdfName, kdfOpts string, privKeyBlock []byte) ([]byte, error) { + if kdfName == "none" || cipherName == "none" { + return nil, errors.New("ssh: key is not password protected") + } + if kdfName != "bcrypt" { + return nil, fmt.Errorf("ssh: unknown KDF %q, only supports %q", kdfName, "bcrypt") + } + + var opts struct { + Salt string + Rounds uint32 + } + if err := Unmarshal([]byte(kdfOpts), &opts); err != nil { + return nil, err + } + + k, err := bcrypt_pbkdf.Key(passphrase, []byte(opts.Salt), int(opts.Rounds), 32+16) + if err != nil { + return nil, err + } + key, iv := k[:32], k[32:] + + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + switch cipherName { + case "aes256-ctr": + ctr := cipher.NewCTR(c, iv) + ctr.XORKeyStream(privKeyBlock, privKeyBlock) + case "aes256-cbc": + if len(privKeyBlock)%c.BlockSize() != 0 { + return nil, fmt.Errorf("ssh: invalid encrypted private key length, not a multiple of the block size") + } + cbc := cipher.NewCBCDecrypter(c, iv) + cbc.CryptBlocks(privKeyBlock, privKeyBlock) + default: + return nil, fmt.Errorf("ssh: unknown cipher %q, only supports %q or %q", cipherName, "aes256-ctr", "aes256-cbc") + } + + return privKeyBlock, nil + } +} + +func unencryptedOpenSSHMarshaler(privKeyBlock []byte) ([]byte, string, string, string, error) { + key := generateOpenSSHPadding(privKeyBlock, 8) + return key, "none", "none", "", nil +} + +func passphraseProtectedOpenSSHMarshaler(passphrase []byte) openSSHEncryptFunc { + return func(privKeyBlock []byte) ([]byte, string, string, string, error) { + salt := make([]byte, 16) + if _, err := rand.Read(salt); err != nil { + return nil, "", "", "", err + } + + opts := struct { + Salt []byte + Rounds uint32 + }{salt, 16} + + // Derive key to encrypt the private key block. + k, err := bcrypt_pbkdf.Key(passphrase, salt, int(opts.Rounds), 32+aes.BlockSize) + if err != nil { + return nil, "", "", "", err + } + + // Add padding matching the block size of AES. + keyBlock := generateOpenSSHPadding(privKeyBlock, aes.BlockSize) + + // Encrypt the private key using the derived secret. + + dst := make([]byte, len(keyBlock)) + key, iv := k[:32], k[32:] + block, err := aes.NewCipher(key) + if err != nil { + return nil, "", "", "", err + } + + stream := cipher.NewCTR(block, iv) + stream.XORKeyStream(dst, keyBlock) + + return dst, "aes256-ctr", "bcrypt", string(Marshal(opts)), nil + } +} + +const privateKeyAuthMagic = "openssh-key-v1\x00" + +type openSSHDecryptFunc func(CipherName, KdfName, KdfOpts string, PrivKeyBlock []byte) ([]byte, error) +type openSSHEncryptFunc func(PrivKeyBlock []byte) (ProtectedKeyBlock []byte, cipherName, kdfName, kdfOptions string, err error) + +type openSSHEncryptedPrivateKey struct { + CipherName string + KdfName string + KdfOpts string + NumKeys uint32 + PubKey []byte + PrivKeyBlock []byte +} + +type openSSHPrivateKey struct { + Check1 uint32 + Check2 uint32 + Keytype string + Rest []byte `ssh:"rest"` +} + +type openSSHRSAPrivateKey struct { + N *big.Int + E *big.Int + D *big.Int + Iqmp *big.Int + P *big.Int + Q *big.Int + Comment string + Pad []byte `ssh:"rest"` +} + +type openSSHEd25519PrivateKey struct { + Pub []byte + Priv []byte + Comment string + Pad []byte `ssh:"rest"` +} + +type openSSHECDSAPrivateKey struct { + Curve string + Pub []byte + D *big.Int + Comment string + Pad []byte `ssh:"rest"` +} + +// parseOpenSSHPrivateKey parses an OpenSSH private key, using the decrypt +// function to unwrap the encrypted portion. unencryptedOpenSSHKey can be used +// as the decrypt function to parse an unencrypted private key. See +// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key. +func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.PrivateKey, error) { + if len(key) < len(privateKeyAuthMagic) || string(key[:len(privateKeyAuthMagic)]) != privateKeyAuthMagic { + return nil, errors.New("ssh: invalid openssh private key format") + } + remaining := key[len(privateKeyAuthMagic):] + + var w openSSHEncryptedPrivateKey + if err := Unmarshal(remaining, &w); err != nil { + return nil, err + } + if w.NumKeys != 1 { + // We only support single key files, and so does OpenSSH. + // https://github.com/openssh/openssh-portable/blob/4103a3ec7/sshkey.c#L4171 + return nil, errors.New("ssh: multi-key files are not supported") + } + + privKeyBlock, err := decrypt(w.CipherName, w.KdfName, w.KdfOpts, w.PrivKeyBlock) + if err != nil { + if err, ok := err.(*PassphraseMissingError); ok { + pub, errPub := ParsePublicKey(w.PubKey) + if errPub != nil { + return nil, fmt.Errorf("ssh: failed to parse embedded public key: %v", errPub) + } + err.PublicKey = pub + } + return nil, err + } + + var pk1 openSSHPrivateKey + if err := Unmarshal(privKeyBlock, &pk1); err != nil || pk1.Check1 != pk1.Check2 { + if w.CipherName != "none" { + return nil, x509.IncorrectPasswordError + } + return nil, errors.New("ssh: malformed OpenSSH key") + } + + switch pk1.Keytype { + case KeyAlgoRSA: + var key openSSHRSAPrivateKey + if err := Unmarshal(pk1.Rest, &key); err != nil { + return nil, err + } + + if err := checkOpenSSHKeyPadding(key.Pad); err != nil { + return nil, err + } + + pk := &rsa.PrivateKey{ + PublicKey: rsa.PublicKey{ + N: key.N, + E: int(key.E.Int64()), + }, + D: key.D, + Primes: []*big.Int{key.P, key.Q}, + } + + if err := pk.Validate(); err != nil { + return nil, err + } + + pk.Precompute() + + return pk, nil + case KeyAlgoED25519: + var key openSSHEd25519PrivateKey + if err := Unmarshal(pk1.Rest, &key); err != nil { + return nil, err + } + + if len(key.Priv) != ed25519.PrivateKeySize { + return nil, errors.New("ssh: private key unexpected length") + } + + if err := checkOpenSSHKeyPadding(key.Pad); err != nil { + return nil, err + } + + pk := ed25519.PrivateKey(make([]byte, ed25519.PrivateKeySize)) + copy(pk, key.Priv) + return &pk, nil + case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: + var key openSSHECDSAPrivateKey + if err := Unmarshal(pk1.Rest, &key); err != nil { + return nil, err + } + + if err := checkOpenSSHKeyPadding(key.Pad); err != nil { + return nil, err + } + + var curve elliptic.Curve + switch key.Curve { + case "nistp256": + curve = elliptic.P256() + case "nistp384": + curve = elliptic.P384() + case "nistp521": + curve = elliptic.P521() + default: + return nil, errors.New("ssh: unhandled elliptic curve: " + key.Curve) + } + + X, Y := elliptic.Unmarshal(curve, key.Pub) + if X == nil || Y == nil { + return nil, errors.New("ssh: failed to unmarshal public key") + } + + if key.D.Cmp(curve.Params().N) >= 0 { + return nil, errors.New("ssh: scalar is out of range") + } + + x, y := curve.ScalarBaseMult(key.D.Bytes()) + if x.Cmp(X) != 0 || y.Cmp(Y) != 0 { + return nil, errors.New("ssh: public key does not match private key") + } + + return &ecdsa.PrivateKey{ + PublicKey: ecdsa.PublicKey{ + Curve: curve, + X: X, + Y: Y, + }, + D: key.D, + }, nil + default: + return nil, errors.New("ssh: unhandled key type") + } +} + +func marshalOpenSSHPrivateKey(key crypto.PrivateKey, comment string, encrypt openSSHEncryptFunc) (*pem.Block, error) { + var w openSSHEncryptedPrivateKey + var pk1 openSSHPrivateKey + + // Random check bytes. + var check uint32 + if err := binary.Read(rand.Reader, binary.BigEndian, &check); err != nil { + return nil, err + } + + pk1.Check1 = check + pk1.Check2 = check + w.NumKeys = 1 + + // Use a []byte directly on ed25519 keys. + if k, ok := key.(*ed25519.PrivateKey); ok { + key = *k + } + + switch k := key.(type) { + case *rsa.PrivateKey: + E := new(big.Int).SetInt64(int64(k.PublicKey.E)) + // Marshal public key: + // E and N are in reversed order in the public and private key. + pubKey := struct { + KeyType string + E *big.Int + N *big.Int + }{ + KeyAlgoRSA, + E, k.PublicKey.N, + } + w.PubKey = Marshal(pubKey) + + // Marshal private key. + key := openSSHRSAPrivateKey{ + N: k.PublicKey.N, + E: E, + D: k.D, + Iqmp: k.Precomputed.Qinv, + P: k.Primes[0], + Q: k.Primes[1], + Comment: comment, + } + pk1.Keytype = KeyAlgoRSA + pk1.Rest = Marshal(key) + case ed25519.PrivateKey: + pub := make([]byte, ed25519.PublicKeySize) + priv := make([]byte, ed25519.PrivateKeySize) + copy(pub, k[32:]) + copy(priv, k) + + // Marshal public key. + pubKey := struct { + KeyType string + Pub []byte + }{ + KeyAlgoED25519, pub, + } + w.PubKey = Marshal(pubKey) + + // Marshal private key. + key := openSSHEd25519PrivateKey{ + Pub: pub, + Priv: priv, + Comment: comment, + } + pk1.Keytype = KeyAlgoED25519 + pk1.Rest = Marshal(key) + case *ecdsa.PrivateKey: + var curve, keyType string + switch name := k.Curve.Params().Name; name { + case "P-256": + curve = "nistp256" + keyType = KeyAlgoECDSA256 + case "P-384": + curve = "nistp384" + keyType = KeyAlgoECDSA384 + case "P-521": + curve = "nistp521" + keyType = KeyAlgoECDSA521 + default: + return nil, errors.New("ssh: unhandled elliptic curve " + name) + } + + pub := elliptic.Marshal(k.Curve, k.PublicKey.X, k.PublicKey.Y) + + // Marshal public key. + pubKey := struct { + KeyType string + Curve string + Pub []byte + }{ + keyType, curve, pub, + } + w.PubKey = Marshal(pubKey) + + // Marshal private key. + key := openSSHECDSAPrivateKey{ + Curve: curve, + Pub: pub, + D: k.D, + Comment: comment, + } + pk1.Keytype = keyType + pk1.Rest = Marshal(key) + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", k) + } + + var err error + // Add padding and encrypt the key if necessary. + w.PrivKeyBlock, w.CipherName, w.KdfName, w.KdfOpts, err = encrypt(Marshal(pk1)) + if err != nil { + return nil, err + } + + b := Marshal(w) + block := &pem.Block{ + Type: "OPENSSH PRIVATE KEY", + Bytes: append([]byte(privateKeyAuthMagic), b...), + } + return block, nil +} + +func checkOpenSSHKeyPadding(pad []byte) error { + for i, b := range pad { + if int(b) != i+1 { + return errors.New("ssh: padding not as expected") + } + } + return nil +} + +func generateOpenSSHPadding(block []byte, blockSize int) []byte { + for i, l := 0, len(block); (l+i)%blockSize != 0; i++ { + block = append(block, byte(i+1)) + } + return block +} + +// FingerprintLegacyMD5 returns the user presentation of the key's +// fingerprint as described by RFC 4716 section 4. +func FingerprintLegacyMD5(pubKey PublicKey) string { + md5sum := md5.Sum(pubKey.Marshal()) + hexarray := make([]string, len(md5sum)) + for i, c := range md5sum { + hexarray[i] = hex.EncodeToString([]byte{c}) + } + return strings.Join(hexarray, ":") +} + +// FingerprintSHA256 returns the user presentation of the key's +// fingerprint as unpadded base64 encoded sha256 hash. +// This format was introduced from OpenSSH 6.8. +// https://www.openssh.com/txt/release-6.8 +// https://tools.ietf.org/html/rfc4648#section-3.2 (unpadded base64 encoding) +func FingerprintSHA256(pubKey PublicKey) string { + sha256sum := sha256.Sum256(pubKey.Marshal()) + hash := base64.RawStdEncoding.EncodeToString(sha256sum[:]) + return "SHA256:" + hash +} diff --git a/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go b/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go new file mode 100644 index 000000000..7376a8dff --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go @@ -0,0 +1,540 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package knownhosts implements a parser for the OpenSSH known_hosts +// host key database, and provides utility functions for writing +// OpenSSH compliant known_hosts files. +package knownhosts + +import ( + "bufio" + "bytes" + "crypto/hmac" + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "errors" + "fmt" + "io" + "net" + "os" + "strings" + + "golang.org/x/crypto/ssh" +) + +// See the sshd manpage +// (http://man.openbsd.org/sshd#SSH_KNOWN_HOSTS_FILE_FORMAT) for +// background. + +type addr struct{ host, port string } + +func (a *addr) String() string { + h := a.host + if strings.Contains(h, ":") { + h = "[" + h + "]" + } + return h + ":" + a.port +} + +type matcher interface { + match(addr) bool +} + +type hostPattern struct { + negate bool + addr addr +} + +func (p *hostPattern) String() string { + n := "" + if p.negate { + n = "!" + } + + return n + p.addr.String() +} + +type hostPatterns []hostPattern + +func (ps hostPatterns) match(a addr) bool { + matched := false + for _, p := range ps { + if !p.match(a) { + continue + } + if p.negate { + return false + } + matched = true + } + return matched +} + +// See +// https://android.googlesource.com/platform/external/openssh/+/ab28f5495c85297e7a597c1ba62e996416da7c7e/addrmatch.c +// The matching of * has no regard for separators, unlike filesystem globs +func wildcardMatch(pat []byte, str []byte) bool { + for { + if len(pat) == 0 { + return len(str) == 0 + } + if len(str) == 0 { + return false + } + + if pat[0] == '*' { + if len(pat) == 1 { + return true + } + + for j := range str { + if wildcardMatch(pat[1:], str[j:]) { + return true + } + } + return false + } + + if pat[0] == '?' || pat[0] == str[0] { + pat = pat[1:] + str = str[1:] + } else { + return false + } + } +} + +func (p *hostPattern) match(a addr) bool { + return wildcardMatch([]byte(p.addr.host), []byte(a.host)) && p.addr.port == a.port +} + +type keyDBLine struct { + cert bool + matcher matcher + knownKey KnownKey +} + +func serialize(k ssh.PublicKey) string { + return k.Type() + " " + base64.StdEncoding.EncodeToString(k.Marshal()) +} + +func (l *keyDBLine) match(a addr) bool { + return l.matcher.match(a) +} + +type hostKeyDB struct { + // Serialized version of revoked keys + revoked map[string]*KnownKey + lines []keyDBLine +} + +func newHostKeyDB() *hostKeyDB { + db := &hostKeyDB{ + revoked: make(map[string]*KnownKey), + } + + return db +} + +func keyEq(a, b ssh.PublicKey) bool { + return bytes.Equal(a.Marshal(), b.Marshal()) +} + +// IsHostAuthority can be used as a callback in ssh.CertChecker +func (db *hostKeyDB) IsHostAuthority(remote ssh.PublicKey, address string) bool { + h, p, err := net.SplitHostPort(address) + if err != nil { + return false + } + a := addr{host: h, port: p} + + for _, l := range db.lines { + if l.cert && keyEq(l.knownKey.Key, remote) && l.match(a) { + return true + } + } + return false +} + +// IsRevoked can be used as a callback in ssh.CertChecker +func (db *hostKeyDB) IsRevoked(key *ssh.Certificate) bool { + _, ok := db.revoked[string(key.Marshal())] + return ok +} + +const markerCert = "@cert-authority" +const markerRevoked = "@revoked" + +func nextWord(line []byte) (string, []byte) { + i := bytes.IndexAny(line, "\t ") + if i == -1 { + return string(line), nil + } + + return string(line[:i]), bytes.TrimSpace(line[i:]) +} + +func parseLine(line []byte) (marker, host string, key ssh.PublicKey, err error) { + if w, next := nextWord(line); w == markerCert || w == markerRevoked { + marker = w + line = next + } + + host, line = nextWord(line) + if len(line) == 0 { + return "", "", nil, errors.New("knownhosts: missing host pattern") + } + + // ignore the keytype as it's in the key blob anyway. + _, line = nextWord(line) + if len(line) == 0 { + return "", "", nil, errors.New("knownhosts: missing key type pattern") + } + + keyBlob, _ := nextWord(line) + + keyBytes, err := base64.StdEncoding.DecodeString(keyBlob) + if err != nil { + return "", "", nil, err + } + key, err = ssh.ParsePublicKey(keyBytes) + if err != nil { + return "", "", nil, err + } + + return marker, host, key, nil +} + +func (db *hostKeyDB) parseLine(line []byte, filename string, linenum int) error { + marker, pattern, key, err := parseLine(line) + if err != nil { + return err + } + + if marker == markerRevoked { + db.revoked[string(key.Marshal())] = &KnownKey{ + Key: key, + Filename: filename, + Line: linenum, + } + + return nil + } + + entry := keyDBLine{ + cert: marker == markerCert, + knownKey: KnownKey{ + Filename: filename, + Line: linenum, + Key: key, + }, + } + + if pattern[0] == '|' { + entry.matcher, err = newHashedHost(pattern) + } else { + entry.matcher, err = newHostnameMatcher(pattern) + } + + if err != nil { + return err + } + + db.lines = append(db.lines, entry) + return nil +} + +func newHostnameMatcher(pattern string) (matcher, error) { + var hps hostPatterns + for _, p := range strings.Split(pattern, ",") { + if len(p) == 0 { + continue + } + + var a addr + var negate bool + if p[0] == '!' { + negate = true + p = p[1:] + } + + if len(p) == 0 { + return nil, errors.New("knownhosts: negation without following hostname") + } + + var err error + if p[0] == '[' { + a.host, a.port, err = net.SplitHostPort(p) + if err != nil { + return nil, err + } + } else { + a.host, a.port, err = net.SplitHostPort(p) + if err != nil { + a.host = p + a.port = "22" + } + } + hps = append(hps, hostPattern{ + negate: negate, + addr: a, + }) + } + return hps, nil +} + +// KnownKey represents a key declared in a known_hosts file. +type KnownKey struct { + Key ssh.PublicKey + Filename string + Line int +} + +func (k *KnownKey) String() string { + return fmt.Sprintf("%s:%d: %s", k.Filename, k.Line, serialize(k.Key)) +} + +// KeyError is returned if we did not find the key in the host key +// database, or there was a mismatch. Typically, in batch +// applications, this should be interpreted as failure. Interactive +// applications can offer an interactive prompt to the user. +type KeyError struct { + // Want holds the accepted host keys. For each key algorithm, + // there can be one hostkey. If Want is empty, the host is + // unknown. If Want is non-empty, there was a mismatch, which + // can signify a MITM attack. + Want []KnownKey +} + +func (u *KeyError) Error() string { + if len(u.Want) == 0 { + return "knownhosts: key is unknown" + } + return "knownhosts: key mismatch" +} + +// RevokedError is returned if we found a key that was revoked. +type RevokedError struct { + Revoked KnownKey +} + +func (r *RevokedError) Error() string { + return "knownhosts: key is revoked" +} + +// check checks a key against the host database. This should not be +// used for verifying certificates. +func (db *hostKeyDB) check(address string, remote net.Addr, remoteKey ssh.PublicKey) error { + if revoked := db.revoked[string(remoteKey.Marshal())]; revoked != nil { + return &RevokedError{Revoked: *revoked} + } + + host, port, err := net.SplitHostPort(remote.String()) + if err != nil { + return fmt.Errorf("knownhosts: SplitHostPort(%s): %v", remote, err) + } + + hostToCheck := addr{host, port} + if address != "" { + // Give preference to the hostname if available. + host, port, err := net.SplitHostPort(address) + if err != nil { + return fmt.Errorf("knownhosts: SplitHostPort(%s): %v", address, err) + } + + hostToCheck = addr{host, port} + } + + return db.checkAddr(hostToCheck, remoteKey) +} + +// checkAddr checks if we can find the given public key for the +// given address. If we only find an entry for the IP address, +// or only the hostname, then this still succeeds. +func (db *hostKeyDB) checkAddr(a addr, remoteKey ssh.PublicKey) error { + // TODO(hanwen): are these the right semantics? What if there + // is just a key for the IP address, but not for the + // hostname? + + // Algorithm => key. + knownKeys := map[string]KnownKey{} + for _, l := range db.lines { + if l.match(a) { + typ := l.knownKey.Key.Type() + if _, ok := knownKeys[typ]; !ok { + knownKeys[typ] = l.knownKey + } + } + } + + keyErr := &KeyError{} + for _, v := range knownKeys { + keyErr.Want = append(keyErr.Want, v) + } + + // Unknown remote host. + if len(knownKeys) == 0 { + return keyErr + } + + // If the remote host starts using a different, unknown key type, we + // also interpret that as a mismatch. + if known, ok := knownKeys[remoteKey.Type()]; !ok || !keyEq(known.Key, remoteKey) { + return keyErr + } + + return nil +} + +// The Read function parses file contents. +func (db *hostKeyDB) Read(r io.Reader, filename string) error { + scanner := bufio.NewScanner(r) + + lineNum := 0 + for scanner.Scan() { + lineNum++ + line := scanner.Bytes() + line = bytes.TrimSpace(line) + if len(line) == 0 || line[0] == '#' { + continue + } + + if err := db.parseLine(line, filename, lineNum); err != nil { + return fmt.Errorf("knownhosts: %s:%d: %v", filename, lineNum, err) + } + } + return scanner.Err() +} + +// New creates a host key callback from the given OpenSSH host key +// files. The returned callback is for use in +// ssh.ClientConfig.HostKeyCallback. By preference, the key check +// operates on the hostname if available, i.e. if a server changes its +// IP address, the host key check will still succeed, even though a +// record of the new IP address is not available. +func New(files ...string) (ssh.HostKeyCallback, error) { + db := newHostKeyDB() + for _, fn := range files { + f, err := os.Open(fn) + if err != nil { + return nil, err + } + defer f.Close() + if err := db.Read(f, fn); err != nil { + return nil, err + } + } + + var certChecker ssh.CertChecker + certChecker.IsHostAuthority = db.IsHostAuthority + certChecker.IsRevoked = db.IsRevoked + certChecker.HostKeyFallback = db.check + + return certChecker.CheckHostKey, nil +} + +// Normalize normalizes an address into the form used in known_hosts +func Normalize(address string) string { + host, port, err := net.SplitHostPort(address) + if err != nil { + host = address + port = "22" + } + entry := host + if port != "22" { + entry = "[" + entry + "]:" + port + } else if strings.Contains(host, ":") && !strings.HasPrefix(host, "[") { + entry = "[" + entry + "]" + } + return entry +} + +// Line returns a line to add append to the known_hosts files. +func Line(addresses []string, key ssh.PublicKey) string { + var trimmed []string + for _, a := range addresses { + trimmed = append(trimmed, Normalize(a)) + } + + return strings.Join(trimmed, ",") + " " + serialize(key) +} + +// HashHostname hashes the given hostname. The hostname is not +// normalized before hashing. +func HashHostname(hostname string) string { + // TODO(hanwen): check if we can safely normalize this always. + salt := make([]byte, sha1.Size) + + _, err := rand.Read(salt) + if err != nil { + panic(fmt.Sprintf("crypto/rand failure %v", err)) + } + + hash := hashHost(hostname, salt) + return encodeHash(sha1HashType, salt, hash) +} + +func decodeHash(encoded string) (hashType string, salt, hash []byte, err error) { + if len(encoded) == 0 || encoded[0] != '|' { + err = errors.New("knownhosts: hashed host must start with '|'") + return + } + components := strings.Split(encoded, "|") + if len(components) != 4 { + err = fmt.Errorf("knownhosts: got %d components, want 3", len(components)) + return + } + + hashType = components[1] + if salt, err = base64.StdEncoding.DecodeString(components[2]); err != nil { + return + } + if hash, err = base64.StdEncoding.DecodeString(components[3]); err != nil { + return + } + return +} + +func encodeHash(typ string, salt []byte, hash []byte) string { + return strings.Join([]string{"", + typ, + base64.StdEncoding.EncodeToString(salt), + base64.StdEncoding.EncodeToString(hash), + }, "|") +} + +// See https://android.googlesource.com/platform/external/openssh/+/ab28f5495c85297e7a597c1ba62e996416da7c7e/hostfile.c#120 +func hashHost(hostname string, salt []byte) []byte { + mac := hmac.New(sha1.New, salt) + mac.Write([]byte(hostname)) + return mac.Sum(nil) +} + +type hashedHost struct { + salt []byte + hash []byte +} + +const sha1HashType = "1" + +func newHashedHost(encoded string) (*hashedHost, error) { + typ, salt, hash, err := decodeHash(encoded) + if err != nil { + return nil, err + } + + // The type field seems for future algorithm agility, but it's + // actually hardcoded in openssh currently, see + // https://android.googlesource.com/platform/external/openssh/+/ab28f5495c85297e7a597c1ba62e996416da7c7e/hostfile.c#120 + if typ != sha1HashType { + return nil, fmt.Errorf("knownhosts: got hash type %s, must be '1'", typ) + } + + return &hashedHost{salt: salt, hash: hash}, nil +} + +func (h *hashedHost) match(a addr) bool { + return bytes.Equal(hashHost(Normalize(a.String()), h.salt), h.hash) +} diff --git a/vendor/golang.org/x/crypto/ssh/mac.go b/vendor/golang.org/x/crypto/ssh/mac.go new file mode 100644 index 000000000..06a1b2750 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/mac.go @@ -0,0 +1,68 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +// Message authentication support + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "hash" +) + +type macMode struct { + keySize int + etm bool + new func(key []byte) hash.Hash +} + +// truncatingMAC wraps around a hash.Hash and truncates the output digest to +// a given size. +type truncatingMAC struct { + length int + hmac hash.Hash +} + +func (t truncatingMAC) Write(data []byte) (int, error) { + return t.hmac.Write(data) +} + +func (t truncatingMAC) Sum(in []byte) []byte { + out := t.hmac.Sum(in) + return out[:len(in)+t.length] +} + +func (t truncatingMAC) Reset() { + t.hmac.Reset() +} + +func (t truncatingMAC) Size() int { + return t.length +} + +func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() } + +var macModes = map[string]*macMode{ + "hmac-sha2-512-etm@openssh.com": {64, true, func(key []byte) hash.Hash { + return hmac.New(sha512.New, key) + }}, + "hmac-sha2-256-etm@openssh.com": {32, true, func(key []byte) hash.Hash { + return hmac.New(sha256.New, key) + }}, + "hmac-sha2-512": {64, false, func(key []byte) hash.Hash { + return hmac.New(sha512.New, key) + }}, + "hmac-sha2-256": {32, false, func(key []byte) hash.Hash { + return hmac.New(sha256.New, key) + }}, + "hmac-sha1": {20, false, func(key []byte) hash.Hash { + return hmac.New(sha1.New, key) + }}, + "hmac-sha1-96": {20, false, func(key []byte) hash.Hash { + return truncatingMAC{12, hmac.New(sha1.New, key)} + }}, +} diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go new file mode 100644 index 000000000..b55f86056 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/messages.go @@ -0,0 +1,891 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math/big" + "reflect" + "strconv" + "strings" +) + +// These are SSH message type numbers. They are scattered around several +// documents but many were taken from [SSH-PARAMETERS]. +const ( + msgIgnore = 2 + msgUnimplemented = 3 + msgDebug = 4 + msgNewKeys = 21 +) + +// SSH messages: +// +// These structures mirror the wire format of the corresponding SSH messages. +// They are marshaled using reflection with the marshal and unmarshal functions +// in this file. The only wrinkle is that a final member of type []byte with a +// ssh tag of "rest" receives the remainder of a packet when unmarshaling. + +// See RFC 4253, section 11.1. +const msgDisconnect = 1 + +// disconnectMsg is the message that signals a disconnect. It is also +// the error type returned from mux.Wait() +type disconnectMsg struct { + Reason uint32 `sshtype:"1"` + Message string + Language string +} + +func (d *disconnectMsg) Error() string { + return fmt.Sprintf("ssh: disconnect, reason %d: %s", d.Reason, d.Message) +} + +// See RFC 4253, section 7.1. +const msgKexInit = 20 + +type kexInitMsg struct { + Cookie [16]byte `sshtype:"20"` + KexAlgos []string + ServerHostKeyAlgos []string + CiphersClientServer []string + CiphersServerClient []string + MACsClientServer []string + MACsServerClient []string + CompressionClientServer []string + CompressionServerClient []string + LanguagesClientServer []string + LanguagesServerClient []string + FirstKexFollows bool + Reserved uint32 +} + +// See RFC 4253, section 8. + +// Diffie-Hellman +const msgKexDHInit = 30 + +type kexDHInitMsg struct { + X *big.Int `sshtype:"30"` +} + +const msgKexECDHInit = 30 + +type kexECDHInitMsg struct { + ClientPubKey []byte `sshtype:"30"` +} + +const msgKexECDHReply = 31 + +type kexECDHReplyMsg struct { + HostKey []byte `sshtype:"31"` + EphemeralPubKey []byte + Signature []byte +} + +const msgKexDHReply = 31 + +type kexDHReplyMsg struct { + HostKey []byte `sshtype:"31"` + Y *big.Int + Signature []byte +} + +// See RFC 4419, section 5. +const msgKexDHGexGroup = 31 + +type kexDHGexGroupMsg struct { + P *big.Int `sshtype:"31"` + G *big.Int +} + +const msgKexDHGexInit = 32 + +type kexDHGexInitMsg struct { + X *big.Int `sshtype:"32"` +} + +const msgKexDHGexReply = 33 + +type kexDHGexReplyMsg struct { + HostKey []byte `sshtype:"33"` + Y *big.Int + Signature []byte +} + +const msgKexDHGexRequest = 34 + +type kexDHGexRequestMsg struct { + MinBits uint32 `sshtype:"34"` + PreferedBits uint32 + MaxBits uint32 +} + +// See RFC 4253, section 10. +const msgServiceRequest = 5 + +type serviceRequestMsg struct { + Service string `sshtype:"5"` +} + +// See RFC 4253, section 10. +const msgServiceAccept = 6 + +type serviceAcceptMsg struct { + Service string `sshtype:"6"` +} + +// See RFC 8308, section 2.3 +const msgExtInfo = 7 + +type extInfoMsg struct { + NumExtensions uint32 `sshtype:"7"` + Payload []byte `ssh:"rest"` +} + +// See RFC 4252, section 5. +const msgUserAuthRequest = 50 + +type userAuthRequestMsg struct { + User string `sshtype:"50"` + Service string + Method string + Payload []byte `ssh:"rest"` +} + +// Used for debug printouts of packets. +type userAuthSuccessMsg struct { +} + +// See RFC 4252, section 5.1 +const msgUserAuthFailure = 51 + +type userAuthFailureMsg struct { + Methods []string `sshtype:"51"` + PartialSuccess bool +} + +// See RFC 4252, section 5.1 +const msgUserAuthSuccess = 52 + +// See RFC 4252, section 5.4 +const msgUserAuthBanner = 53 + +type userAuthBannerMsg struct { + Message string `sshtype:"53"` + // unused, but required to allow message parsing + Language string +} + +// See RFC 4256, section 3.2 +const msgUserAuthInfoRequest = 60 +const msgUserAuthInfoResponse = 61 + +type userAuthInfoRequestMsg struct { + Name string `sshtype:"60"` + Instruction string + Language string + NumPrompts uint32 + Prompts []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.1. +const msgChannelOpen = 90 + +type channelOpenMsg struct { + ChanType string `sshtype:"90"` + PeersID uint32 + PeersWindow uint32 + MaxPacketSize uint32 + TypeSpecificData []byte `ssh:"rest"` +} + +const msgChannelExtendedData = 95 +const msgChannelData = 94 + +// Used for debug print outs of packets. +type channelDataMsg struct { + PeersID uint32 `sshtype:"94"` + Length uint32 + Rest []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.1. +const msgChannelOpenConfirm = 91 + +type channelOpenConfirmMsg struct { + PeersID uint32 `sshtype:"91"` + MyID uint32 + MyWindow uint32 + MaxPacketSize uint32 + TypeSpecificData []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.1. +const msgChannelOpenFailure = 92 + +type channelOpenFailureMsg struct { + PeersID uint32 `sshtype:"92"` + Reason RejectionReason + Message string + Language string +} + +const msgChannelRequest = 98 + +type channelRequestMsg struct { + PeersID uint32 `sshtype:"98"` + Request string + WantReply bool + RequestSpecificData []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.4. +const msgChannelSuccess = 99 + +type channelRequestSuccessMsg struct { + PeersID uint32 `sshtype:"99"` +} + +// See RFC 4254, section 5.4. +const msgChannelFailure = 100 + +type channelRequestFailureMsg struct { + PeersID uint32 `sshtype:"100"` +} + +// See RFC 4254, section 5.3 +const msgChannelClose = 97 + +type channelCloseMsg struct { + PeersID uint32 `sshtype:"97"` +} + +// See RFC 4254, section 5.3 +const msgChannelEOF = 96 + +type channelEOFMsg struct { + PeersID uint32 `sshtype:"96"` +} + +// See RFC 4254, section 4 +const msgGlobalRequest = 80 + +type globalRequestMsg struct { + Type string `sshtype:"80"` + WantReply bool + Data []byte `ssh:"rest"` +} + +// See RFC 4254, section 4 +const msgRequestSuccess = 81 + +type globalRequestSuccessMsg struct { + Data []byte `ssh:"rest" sshtype:"81"` +} + +// See RFC 4254, section 4 +const msgRequestFailure = 82 + +type globalRequestFailureMsg struct { + Data []byte `ssh:"rest" sshtype:"82"` +} + +// See RFC 4254, section 5.2 +const msgChannelWindowAdjust = 93 + +type windowAdjustMsg struct { + PeersID uint32 `sshtype:"93"` + AdditionalBytes uint32 +} + +// See RFC 4252, section 7 +const msgUserAuthPubKeyOk = 60 + +type userAuthPubKeyOkMsg struct { + Algo string `sshtype:"60"` + PubKey []byte +} + +// See RFC 4462, section 3 +const msgUserAuthGSSAPIResponse = 60 + +type userAuthGSSAPIResponse struct { + SupportMech []byte `sshtype:"60"` +} + +const msgUserAuthGSSAPIToken = 61 + +type userAuthGSSAPIToken struct { + Token []byte `sshtype:"61"` +} + +const msgUserAuthGSSAPIMIC = 66 + +type userAuthGSSAPIMIC struct { + MIC []byte `sshtype:"66"` +} + +// See RFC 4462, section 3.9 +const msgUserAuthGSSAPIErrTok = 64 + +type userAuthGSSAPIErrTok struct { + ErrorToken []byte `sshtype:"64"` +} + +// See RFC 4462, section 3.8 +const msgUserAuthGSSAPIError = 65 + +type userAuthGSSAPIError struct { + MajorStatus uint32 `sshtype:"65"` + MinorStatus uint32 + Message string + LanguageTag string +} + +// Transport layer OpenSSH extension. See [PROTOCOL], section 1.9 +const msgPing = 192 + +type pingMsg struct { + Data string `sshtype:"192"` +} + +// Transport layer OpenSSH extension. See [PROTOCOL], section 1.9 +const msgPong = 193 + +type pongMsg struct { + Data string `sshtype:"193"` +} + +// typeTags returns the possible type bytes for the given reflect.Type, which +// should be a struct. The possible values are separated by a '|' character. +func typeTags(structType reflect.Type) (tags []byte) { + tagStr := structType.Field(0).Tag.Get("sshtype") + + for _, tag := range strings.Split(tagStr, "|") { + i, err := strconv.Atoi(tag) + if err == nil { + tags = append(tags, byte(i)) + } + } + + return tags +} + +func fieldError(t reflect.Type, field int, problem string) error { + if problem != "" { + problem = ": " + problem + } + return fmt.Errorf("ssh: unmarshal error for field %s of type %s%s", t.Field(field).Name, t.Name(), problem) +} + +var errShortRead = errors.New("ssh: short read") + +// Unmarshal parses data in SSH wire format into a structure. The out +// argument should be a pointer to struct. If the first member of the +// struct has the "sshtype" tag set to a '|'-separated set of numbers +// in decimal, the packet must start with one of those numbers. In +// case of error, Unmarshal returns a ParseError or +// UnexpectedMessageError. +func Unmarshal(data []byte, out interface{}) error { + v := reflect.ValueOf(out).Elem() + structType := v.Type() + expectedTypes := typeTags(structType) + + var expectedType byte + if len(expectedTypes) > 0 { + expectedType = expectedTypes[0] + } + + if len(data) == 0 { + return parseError(expectedType) + } + + if len(expectedTypes) > 0 { + goodType := false + for _, e := range expectedTypes { + if e > 0 && data[0] == e { + goodType = true + break + } + } + if !goodType { + return fmt.Errorf("ssh: unexpected message type %d (expected one of %v)", data[0], expectedTypes) + } + data = data[1:] + } + + var ok bool + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + t := field.Type() + switch t.Kind() { + case reflect.Bool: + if len(data) < 1 { + return errShortRead + } + field.SetBool(data[0] != 0) + data = data[1:] + case reflect.Array: + if t.Elem().Kind() != reflect.Uint8 { + return fieldError(structType, i, "array of unsupported type") + } + if len(data) < t.Len() { + return errShortRead + } + for j, n := 0, t.Len(); j < n; j++ { + field.Index(j).Set(reflect.ValueOf(data[j])) + } + data = data[t.Len():] + case reflect.Uint64: + var u64 uint64 + if u64, data, ok = parseUint64(data); !ok { + return errShortRead + } + field.SetUint(u64) + case reflect.Uint32: + var u32 uint32 + if u32, data, ok = parseUint32(data); !ok { + return errShortRead + } + field.SetUint(uint64(u32)) + case reflect.Uint8: + if len(data) < 1 { + return errShortRead + } + field.SetUint(uint64(data[0])) + data = data[1:] + case reflect.String: + var s []byte + if s, data, ok = parseString(data); !ok { + return fieldError(structType, i, "") + } + field.SetString(string(s)) + case reflect.Slice: + switch t.Elem().Kind() { + case reflect.Uint8: + if structType.Field(i).Tag.Get("ssh") == "rest" { + field.Set(reflect.ValueOf(data)) + data = nil + } else { + var s []byte + if s, data, ok = parseString(data); !ok { + return errShortRead + } + field.Set(reflect.ValueOf(s)) + } + case reflect.String: + var nl []string + if nl, data, ok = parseNameList(data); !ok { + return errShortRead + } + field.Set(reflect.ValueOf(nl)) + default: + return fieldError(structType, i, "slice of unsupported type") + } + case reflect.Ptr: + if t == bigIntType { + var n *big.Int + if n, data, ok = parseInt(data); !ok { + return errShortRead + } + field.Set(reflect.ValueOf(n)) + } else { + return fieldError(structType, i, "pointer to unsupported type") + } + default: + return fieldError(structType, i, fmt.Sprintf("unsupported type: %v", t)) + } + } + + if len(data) != 0 { + return parseError(expectedType) + } + + return nil +} + +// Marshal serializes the message in msg to SSH wire format. The msg +// argument should be a struct or pointer to struct. If the first +// member has the "sshtype" tag set to a number in decimal, that +// number is prepended to the result. If the last of member has the +// "ssh" tag set to "rest", its contents are appended to the output. +func Marshal(msg interface{}) []byte { + out := make([]byte, 0, 64) + return marshalStruct(out, msg) +} + +func marshalStruct(out []byte, msg interface{}) []byte { + v := reflect.Indirect(reflect.ValueOf(msg)) + msgTypes := typeTags(v.Type()) + if len(msgTypes) > 0 { + out = append(out, msgTypes[0]) + } + + for i, n := 0, v.NumField(); i < n; i++ { + field := v.Field(i) + switch t := field.Type(); t.Kind() { + case reflect.Bool: + var v uint8 + if field.Bool() { + v = 1 + } + out = append(out, v) + case reflect.Array: + if t.Elem().Kind() != reflect.Uint8 { + panic(fmt.Sprintf("array of non-uint8 in field %d: %T", i, field.Interface())) + } + for j, l := 0, t.Len(); j < l; j++ { + out = append(out, uint8(field.Index(j).Uint())) + } + case reflect.Uint32: + out = appendU32(out, uint32(field.Uint())) + case reflect.Uint64: + out = appendU64(out, uint64(field.Uint())) + case reflect.Uint8: + out = append(out, uint8(field.Uint())) + case reflect.String: + s := field.String() + out = appendInt(out, len(s)) + out = append(out, s...) + case reflect.Slice: + switch t.Elem().Kind() { + case reflect.Uint8: + if v.Type().Field(i).Tag.Get("ssh") != "rest" { + out = appendInt(out, field.Len()) + } + out = append(out, field.Bytes()...) + case reflect.String: + offset := len(out) + out = appendU32(out, 0) + if n := field.Len(); n > 0 { + for j := 0; j < n; j++ { + f := field.Index(j) + if j != 0 { + out = append(out, ',') + } + out = append(out, f.String()...) + } + // overwrite length value + binary.BigEndian.PutUint32(out[offset:], uint32(len(out)-offset-4)) + } + default: + panic(fmt.Sprintf("slice of unknown type in field %d: %T", i, field.Interface())) + } + case reflect.Ptr: + if t == bigIntType { + var n *big.Int + nValue := reflect.ValueOf(&n) + nValue.Elem().Set(field) + needed := intLength(n) + oldLength := len(out) + + if cap(out)-len(out) < needed { + newOut := make([]byte, len(out), 2*(len(out)+needed)) + copy(newOut, out) + out = newOut + } + out = out[:oldLength+needed] + marshalInt(out[oldLength:], n) + } else { + panic(fmt.Sprintf("pointer to unknown type in field %d: %T", i, field.Interface())) + } + } + } + + return out +} + +var bigOne = big.NewInt(1) + +func parseString(in []byte) (out, rest []byte, ok bool) { + if len(in) < 4 { + return + } + length := binary.BigEndian.Uint32(in) + in = in[4:] + if uint32(len(in)) < length { + return + } + out = in[:length] + rest = in[length:] + ok = true + return +} + +var ( + comma = []byte{','} + emptyNameList = []string{} +) + +func parseNameList(in []byte) (out []string, rest []byte, ok bool) { + contents, rest, ok := parseString(in) + if !ok { + return + } + if len(contents) == 0 { + out = emptyNameList + return + } + parts := bytes.Split(contents, comma) + out = make([]string, len(parts)) + for i, part := range parts { + out[i] = string(part) + } + return +} + +func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) { + contents, rest, ok := parseString(in) + if !ok { + return + } + out = new(big.Int) + + if len(contents) > 0 && contents[0]&0x80 == 0x80 { + // This is a negative number + notBytes := make([]byte, len(contents)) + for i := range notBytes { + notBytes[i] = ^contents[i] + } + out.SetBytes(notBytes) + out.Add(out, bigOne) + out.Neg(out) + } else { + // Positive number + out.SetBytes(contents) + } + ok = true + return +} + +func parseUint32(in []byte) (uint32, []byte, bool) { + if len(in) < 4 { + return 0, nil, false + } + return binary.BigEndian.Uint32(in), in[4:], true +} + +func parseUint64(in []byte) (uint64, []byte, bool) { + if len(in) < 8 { + return 0, nil, false + } + return binary.BigEndian.Uint64(in), in[8:], true +} + +func intLength(n *big.Int) int { + length := 4 /* length bytes */ + if n.Sign() < 0 { + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bitLen := nMinus1.BitLen() + if bitLen%8 == 0 { + // The number will need 0xff padding + length++ + } + length += (bitLen + 7) / 8 + } else if n.Sign() == 0 { + // A zero is the zero length string + } else { + bitLen := n.BitLen() + if bitLen%8 == 0 { + // The number will need 0x00 padding + length++ + } + length += (bitLen + 7) / 8 + } + + return length +} + +func marshalUint32(to []byte, n uint32) []byte { + binary.BigEndian.PutUint32(to, n) + return to[4:] +} + +func marshalUint64(to []byte, n uint64) []byte { + binary.BigEndian.PutUint64(to, n) + return to[8:] +} + +func marshalInt(to []byte, n *big.Int) []byte { + lengthBytes := to + to = to[4:] + length := 0 + + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement + // form. So we'll subtract 1 and invert. If the + // most-significant-bit isn't set then we'll need to pad the + // beginning with 0xff in order to keep the number negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + to[0] = 0xff + to = to[1:] + length++ + } + nBytes := copy(to, bytes) + to = to[nBytes:] + length += nBytes + } else if n.Sign() == 0 { + // A zero is the zero length string + } else { + bytes := n.Bytes() + if len(bytes) > 0 && bytes[0]&0x80 != 0 { + // We'll have to pad this with a 0x00 in order to + // stop it looking like a negative number. + to[0] = 0 + to = to[1:] + length++ + } + nBytes := copy(to, bytes) + to = to[nBytes:] + length += nBytes + } + + lengthBytes[0] = byte(length >> 24) + lengthBytes[1] = byte(length >> 16) + lengthBytes[2] = byte(length >> 8) + lengthBytes[3] = byte(length) + return to +} + +func writeInt(w io.Writer, n *big.Int) { + length := intLength(n) + buf := make([]byte, length) + marshalInt(buf, n) + w.Write(buf) +} + +func writeString(w io.Writer, s []byte) { + var lengthBytes [4]byte + lengthBytes[0] = byte(len(s) >> 24) + lengthBytes[1] = byte(len(s) >> 16) + lengthBytes[2] = byte(len(s) >> 8) + lengthBytes[3] = byte(len(s)) + w.Write(lengthBytes[:]) + w.Write(s) +} + +func stringLength(n int) int { + return 4 + n +} + +func marshalString(to []byte, s []byte) []byte { + to[0] = byte(len(s) >> 24) + to[1] = byte(len(s) >> 16) + to[2] = byte(len(s) >> 8) + to[3] = byte(len(s)) + to = to[4:] + copy(to, s) + return to[len(s):] +} + +var bigIntType = reflect.TypeOf((*big.Int)(nil)) + +// Decode a packet into its corresponding message. +func decode(packet []byte) (interface{}, error) { + var msg interface{} + switch packet[0] { + case msgDisconnect: + msg = new(disconnectMsg) + case msgServiceRequest: + msg = new(serviceRequestMsg) + case msgServiceAccept: + msg = new(serviceAcceptMsg) + case msgExtInfo: + msg = new(extInfoMsg) + case msgKexInit: + msg = new(kexInitMsg) + case msgKexDHInit: + msg = new(kexDHInitMsg) + case msgKexDHReply: + msg = new(kexDHReplyMsg) + case msgUserAuthRequest: + msg = new(userAuthRequestMsg) + case msgUserAuthSuccess: + return new(userAuthSuccessMsg), nil + case msgUserAuthFailure: + msg = new(userAuthFailureMsg) + case msgUserAuthPubKeyOk: + msg = new(userAuthPubKeyOkMsg) + case msgGlobalRequest: + msg = new(globalRequestMsg) + case msgRequestSuccess: + msg = new(globalRequestSuccessMsg) + case msgRequestFailure: + msg = new(globalRequestFailureMsg) + case msgChannelOpen: + msg = new(channelOpenMsg) + case msgChannelData: + msg = new(channelDataMsg) + case msgChannelOpenConfirm: + msg = new(channelOpenConfirmMsg) + case msgChannelOpenFailure: + msg = new(channelOpenFailureMsg) + case msgChannelWindowAdjust: + msg = new(windowAdjustMsg) + case msgChannelEOF: + msg = new(channelEOFMsg) + case msgChannelClose: + msg = new(channelCloseMsg) + case msgChannelRequest: + msg = new(channelRequestMsg) + case msgChannelSuccess: + msg = new(channelRequestSuccessMsg) + case msgChannelFailure: + msg = new(channelRequestFailureMsg) + case msgUserAuthGSSAPIToken: + msg = new(userAuthGSSAPIToken) + case msgUserAuthGSSAPIMIC: + msg = new(userAuthGSSAPIMIC) + case msgUserAuthGSSAPIErrTok: + msg = new(userAuthGSSAPIErrTok) + case msgUserAuthGSSAPIError: + msg = new(userAuthGSSAPIError) + default: + return nil, unexpectedMessageError(0, packet[0]) + } + if err := Unmarshal(packet, msg); err != nil { + return nil, err + } + return msg, nil +} + +var packetTypeNames = map[byte]string{ + msgDisconnect: "disconnectMsg", + msgServiceRequest: "serviceRequestMsg", + msgServiceAccept: "serviceAcceptMsg", + msgExtInfo: "extInfoMsg", + msgKexInit: "kexInitMsg", + msgKexDHInit: "kexDHInitMsg", + msgKexDHReply: "kexDHReplyMsg", + msgUserAuthRequest: "userAuthRequestMsg", + msgUserAuthSuccess: "userAuthSuccessMsg", + msgUserAuthFailure: "userAuthFailureMsg", + msgUserAuthPubKeyOk: "userAuthPubKeyOkMsg", + msgGlobalRequest: "globalRequestMsg", + msgRequestSuccess: "globalRequestSuccessMsg", + msgRequestFailure: "globalRequestFailureMsg", + msgChannelOpen: "channelOpenMsg", + msgChannelData: "channelDataMsg", + msgChannelOpenConfirm: "channelOpenConfirmMsg", + msgChannelOpenFailure: "channelOpenFailureMsg", + msgChannelWindowAdjust: "windowAdjustMsg", + msgChannelEOF: "channelEOFMsg", + msgChannelClose: "channelCloseMsg", + msgChannelRequest: "channelRequestMsg", + msgChannelSuccess: "channelRequestSuccessMsg", + msgChannelFailure: "channelRequestFailureMsg", +} diff --git a/vendor/golang.org/x/crypto/ssh/mux.go b/vendor/golang.org/x/crypto/ssh/mux.go new file mode 100644 index 000000000..d2d24c635 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/mux.go @@ -0,0 +1,357 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "encoding/binary" + "fmt" + "io" + "log" + "sync" + "sync/atomic" +) + +// debugMux, if set, causes messages in the connection protocol to be +// logged. +const debugMux = false + +// chanList is a thread safe channel list. +type chanList struct { + // protects concurrent access to chans + sync.Mutex + + // chans are indexed by the local id of the channel, which the + // other side should send in the PeersId field. + chans []*channel + + // This is a debugging aid: it offsets all IDs by this + // amount. This helps distinguish otherwise identical + // server/client muxes + offset uint32 +} + +// Assigns a channel ID to the given channel. +func (c *chanList) add(ch *channel) uint32 { + c.Lock() + defer c.Unlock() + for i := range c.chans { + if c.chans[i] == nil { + c.chans[i] = ch + return uint32(i) + c.offset + } + } + c.chans = append(c.chans, ch) + return uint32(len(c.chans)-1) + c.offset +} + +// getChan returns the channel for the given ID. +func (c *chanList) getChan(id uint32) *channel { + id -= c.offset + + c.Lock() + defer c.Unlock() + if id < uint32(len(c.chans)) { + return c.chans[id] + } + return nil +} + +func (c *chanList) remove(id uint32) { + id -= c.offset + c.Lock() + if id < uint32(len(c.chans)) { + c.chans[id] = nil + } + c.Unlock() +} + +// dropAll forgets all channels it knows, returning them in a slice. +func (c *chanList) dropAll() []*channel { + c.Lock() + defer c.Unlock() + var r []*channel + + for _, ch := range c.chans { + if ch == nil { + continue + } + r = append(r, ch) + } + c.chans = nil + return r +} + +// mux represents the state for the SSH connection protocol, which +// multiplexes many channels onto a single packet transport. +type mux struct { + conn packetConn + chanList chanList + + incomingChannels chan NewChannel + + globalSentMu sync.Mutex + globalResponses chan interface{} + incomingRequests chan *Request + + errCond *sync.Cond + err error +} + +// When debugging, each new chanList instantiation has a different +// offset. +var globalOff uint32 + +func (m *mux) Wait() error { + m.errCond.L.Lock() + defer m.errCond.L.Unlock() + for m.err == nil { + m.errCond.Wait() + } + return m.err +} + +// newMux returns a mux that runs over the given connection. +func newMux(p packetConn) *mux { + m := &mux{ + conn: p, + incomingChannels: make(chan NewChannel, chanSize), + globalResponses: make(chan interface{}, 1), + incomingRequests: make(chan *Request, chanSize), + errCond: newCond(), + } + if debugMux { + m.chanList.offset = atomic.AddUint32(&globalOff, 1) + } + + go m.loop() + return m +} + +func (m *mux) sendMessage(msg interface{}) error { + p := Marshal(msg) + if debugMux { + log.Printf("send global(%d): %#v", m.chanList.offset, msg) + } + return m.conn.writePacket(p) +} + +func (m *mux) SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) { + if wantReply { + m.globalSentMu.Lock() + defer m.globalSentMu.Unlock() + } + + if err := m.sendMessage(globalRequestMsg{ + Type: name, + WantReply: wantReply, + Data: payload, + }); err != nil { + return false, nil, err + } + + if !wantReply { + return false, nil, nil + } + + msg, ok := <-m.globalResponses + if !ok { + return false, nil, io.EOF + } + switch msg := msg.(type) { + case *globalRequestFailureMsg: + return false, msg.Data, nil + case *globalRequestSuccessMsg: + return true, msg.Data, nil + default: + return false, nil, fmt.Errorf("ssh: unexpected response to request: %#v", msg) + } +} + +// ackRequest must be called after processing a global request that +// has WantReply set. +func (m *mux) ackRequest(ok bool, data []byte) error { + if ok { + return m.sendMessage(globalRequestSuccessMsg{Data: data}) + } + return m.sendMessage(globalRequestFailureMsg{Data: data}) +} + +func (m *mux) Close() error { + return m.conn.Close() +} + +// loop runs the connection machine. It will process packets until an +// error is encountered. To synchronize on loop exit, use mux.Wait. +func (m *mux) loop() { + var err error + for err == nil { + err = m.onePacket() + } + + for _, ch := range m.chanList.dropAll() { + ch.close() + } + + close(m.incomingChannels) + close(m.incomingRequests) + close(m.globalResponses) + + m.conn.Close() + + m.errCond.L.Lock() + m.err = err + m.errCond.Broadcast() + m.errCond.L.Unlock() + + if debugMux { + log.Println("loop exit", err) + } +} + +// onePacket reads and processes one packet. +func (m *mux) onePacket() error { + packet, err := m.conn.readPacket() + if err != nil { + return err + } + + if debugMux { + if packet[0] == msgChannelData || packet[0] == msgChannelExtendedData { + log.Printf("decoding(%d): data packet - %d bytes", m.chanList.offset, len(packet)) + } else { + p, _ := decode(packet) + log.Printf("decoding(%d): %d %#v - %d bytes", m.chanList.offset, packet[0], p, len(packet)) + } + } + + switch packet[0] { + case msgChannelOpen: + return m.handleChannelOpen(packet) + case msgGlobalRequest, msgRequestSuccess, msgRequestFailure: + return m.handleGlobalPacket(packet) + case msgPing: + var msg pingMsg + if err := Unmarshal(packet, &msg); err != nil { + return fmt.Errorf("failed to unmarshal ping@openssh.com message: %w", err) + } + return m.sendMessage(pongMsg(msg)) + } + + // assume a channel packet. + if len(packet) < 5 { + return parseError(packet[0]) + } + id := binary.BigEndian.Uint32(packet[1:]) + ch := m.chanList.getChan(id) + if ch == nil { + return m.handleUnknownChannelPacket(id, packet) + } + + return ch.handlePacket(packet) +} + +func (m *mux) handleGlobalPacket(packet []byte) error { + msg, err := decode(packet) + if err != nil { + return err + } + + switch msg := msg.(type) { + case *globalRequestMsg: + m.incomingRequests <- &Request{ + Type: msg.Type, + WantReply: msg.WantReply, + Payload: msg.Data, + mux: m, + } + case *globalRequestSuccessMsg, *globalRequestFailureMsg: + m.globalResponses <- msg + default: + panic(fmt.Sprintf("not a global message %#v", msg)) + } + + return nil +} + +// handleChannelOpen schedules a channel to be Accept()ed. +func (m *mux) handleChannelOpen(packet []byte) error { + var msg channelOpenMsg + if err := Unmarshal(packet, &msg); err != nil { + return err + } + + if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { + failMsg := channelOpenFailureMsg{ + PeersID: msg.PeersID, + Reason: ConnectionFailed, + Message: "invalid request", + Language: "en_US.UTF-8", + } + return m.sendMessage(failMsg) + } + + c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData) + c.remoteId = msg.PeersID + c.maxRemotePayload = msg.MaxPacketSize + c.remoteWin.add(msg.PeersWindow) + m.incomingChannels <- c + return nil +} + +func (m *mux) OpenChannel(chanType string, extra []byte) (Channel, <-chan *Request, error) { + ch, err := m.openChannel(chanType, extra) + if err != nil { + return nil, nil, err + } + + return ch, ch.incomingRequests, nil +} + +func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) { + ch := m.newChannel(chanType, channelOutbound, extra) + + ch.maxIncomingPayload = channelMaxPacket + + open := channelOpenMsg{ + ChanType: chanType, + PeersWindow: ch.myWindow, + MaxPacketSize: ch.maxIncomingPayload, + TypeSpecificData: extra, + PeersID: ch.localId, + } + if err := m.sendMessage(open); err != nil { + return nil, err + } + + switch msg := (<-ch.msg).(type) { + case *channelOpenConfirmMsg: + return ch, nil + case *channelOpenFailureMsg: + return nil, &OpenChannelError{msg.Reason, msg.Message} + default: + return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg) + } +} + +func (m *mux) handleUnknownChannelPacket(id uint32, packet []byte) error { + msg, err := decode(packet) + if err != nil { + return err + } + + switch msg := msg.(type) { + // RFC 4254 section 5.4 says unrecognized channel requests should + // receive a failure response. + case *channelRequestMsg: + if msg.WantReply { + return m.sendMessage(channelRequestFailureMsg{ + PeersID: msg.PeersID, + }) + } + return nil + default: + return fmt.Errorf("ssh: invalid channel %d", id) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go new file mode 100644 index 000000000..3ca9e89e2 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -0,0 +1,902 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "io" + "net" + "strings" +) + +// The Permissions type holds fine-grained permissions that are +// specific to a user or a specific authentication method for a user. +// The Permissions value for a successful authentication attempt is +// available in ServerConn, so it can be used to pass information from +// the user-authentication phase to the application layer. +type Permissions struct { + // CriticalOptions indicate restrictions to the default + // permissions, and are typically used in conjunction with + // user certificates. The standard for SSH certificates + // defines "force-command" (only allow the given command to + // execute) and "source-address" (only allow connections from + // the given address). The SSH package currently only enforces + // the "source-address" critical option. It is up to server + // implementations to enforce other critical options, such as + // "force-command", by checking them after the SSH handshake + // is successful. In general, SSH servers should reject + // connections that specify critical options that are unknown + // or not supported. + CriticalOptions map[string]string + + // Extensions are extra functionality that the server may + // offer on authenticated connections. Lack of support for an + // extension does not preclude authenticating a user. Common + // extensions are "permit-agent-forwarding", + // "permit-X11-forwarding". The Go SSH library currently does + // not act on any extension, and it is up to server + // implementations to honor them. Extensions can be used to + // pass data from the authentication callbacks to the server + // application layer. + Extensions map[string]string +} + +type GSSAPIWithMICConfig struct { + // AllowLogin, must be set, is called when gssapi-with-mic + // authentication is selected (RFC 4462 section 3). The srcName is from the + // results of the GSS-API authentication. The format is username@DOMAIN. + // GSSAPI just guarantees to the server who the user is, but not if they can log in, and with what permissions. + // This callback is called after the user identity is established with GSSAPI to decide if the user can login with + // which permissions. If the user is allowed to login, it should return a nil error. + AllowLogin func(conn ConnMetadata, srcName string) (*Permissions, error) + + // Server must be set. It's the implementation + // of the GSSAPIServer interface. See GSSAPIServer interface for details. + Server GSSAPIServer +} + +// ServerConfig holds server specific configuration data. +type ServerConfig struct { + // Config contains configuration shared between client and server. + Config + + // PublicKeyAuthAlgorithms specifies the supported client public key + // authentication algorithms. Note that this should not include certificate + // types since those use the underlying algorithm. This list is sent to the + // client if it supports the server-sig-algs extension. Order is irrelevant. + // If unspecified then a default set of algorithms is used. + PublicKeyAuthAlgorithms []string + + hostKeys []Signer + + // NoClientAuth is true if clients are allowed to connect without + // authenticating. + // To determine NoClientAuth at runtime, set NoClientAuth to true + // and the optional NoClientAuthCallback to a non-nil value. + NoClientAuth bool + + // NoClientAuthCallback, if non-nil, is called when a user + // attempts to authenticate with auth method "none". + // NoClientAuth must also be set to true for this be used, or + // this func is unused. + NoClientAuthCallback func(ConnMetadata) (*Permissions, error) + + // MaxAuthTries specifies the maximum number of authentication attempts + // permitted per connection. If set to a negative number, the number of + // attempts are unlimited. If set to zero, the number of attempts are limited + // to 6. + MaxAuthTries int + + // PasswordCallback, if non-nil, is called when a user + // attempts to authenticate using a password. + PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error) + + // PublicKeyCallback, if non-nil, is called when a client + // offers a public key for authentication. It must return a nil error + // if the given public key can be used to authenticate the + // given user. For example, see CertChecker.Authenticate. A + // call to this function does not guarantee that the key + // offered is in fact used to authenticate. To record any data + // depending on the public key, store it inside a + // Permissions.Extensions entry. + PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) + + // KeyboardInteractiveCallback, if non-nil, is called when + // keyboard-interactive authentication is selected (RFC + // 4256). The client object's Challenge function should be + // used to query the user. The callback may offer multiple + // Challenge rounds. To avoid information leaks, the client + // should be presented a challenge even if the user is + // unknown. + KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error) + + // AuthLogCallback, if non-nil, is called to log all authentication + // attempts. + AuthLogCallback func(conn ConnMetadata, method string, err error) + + // ServerVersion is the version identification string to announce in + // the public handshake. + // If empty, a reasonable default is used. + // Note that RFC 4253 section 4.2 requires that this string start with + // "SSH-2.0-". + ServerVersion string + + // BannerCallback, if present, is called and the return string is sent to + // the client after key exchange completed but before authentication. + BannerCallback func(conn ConnMetadata) string + + // GSSAPIWithMICConfig includes gssapi server and callback, which if both non-nil, is used + // when gssapi-with-mic authentication is selected (RFC 4462 section 3). + GSSAPIWithMICConfig *GSSAPIWithMICConfig +} + +// AddHostKey adds a private key as a host key. If an existing host +// key exists with the same public key format, it is replaced. Each server +// config must have at least one host key. +func (s *ServerConfig) AddHostKey(key Signer) { + for i, k := range s.hostKeys { + if k.PublicKey().Type() == key.PublicKey().Type() { + s.hostKeys[i] = key + return + } + } + + s.hostKeys = append(s.hostKeys, key) +} + +// cachedPubKey contains the results of querying whether a public key is +// acceptable for a user. +type cachedPubKey struct { + user string + pubKeyData []byte + result error + perms *Permissions +} + +const maxCachedPubKeys = 16 + +// pubKeyCache caches tests for public keys. Since SSH clients +// will query whether a public key is acceptable before attempting to +// authenticate with it, we end up with duplicate queries for public +// key validity. The cache only applies to a single ServerConn. +type pubKeyCache struct { + keys []cachedPubKey +} + +// get returns the result for a given user/algo/key tuple. +func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) { + for _, k := range c.keys { + if k.user == user && bytes.Equal(k.pubKeyData, pubKeyData) { + return k, true + } + } + return cachedPubKey{}, false +} + +// add adds the given tuple to the cache. +func (c *pubKeyCache) add(candidate cachedPubKey) { + if len(c.keys) < maxCachedPubKeys { + c.keys = append(c.keys, candidate) + } +} + +// ServerConn is an authenticated SSH connection, as seen from the +// server +type ServerConn struct { + Conn + + // If the succeeding authentication callback returned a + // non-nil Permissions pointer, it is stored here. + Permissions *Permissions +} + +// NewServerConn starts a new SSH server with c as the underlying +// transport. It starts with a handshake and, if the handshake is +// unsuccessful, it closes the connection and returns an error. The +// Request and NewChannel channels must be serviced, or the connection +// will hang. +// +// The returned error may be of type *ServerAuthError for +// authentication errors. +func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) { + fullConf := *config + fullConf.SetDefaults() + if fullConf.MaxAuthTries == 0 { + fullConf.MaxAuthTries = 6 + } + if len(fullConf.PublicKeyAuthAlgorithms) == 0 { + fullConf.PublicKeyAuthAlgorithms = supportedPubKeyAuthAlgos + } else { + for _, algo := range fullConf.PublicKeyAuthAlgorithms { + if !contains(supportedPubKeyAuthAlgos, algo) { + c.Close() + return nil, nil, nil, fmt.Errorf("ssh: unsupported public key authentication algorithm %s", algo) + } + } + } + // Check if the config contains any unsupported key exchanges + for _, kex := range fullConf.KeyExchanges { + if _, ok := serverForbiddenKexAlgos[kex]; ok { + c.Close() + return nil, nil, nil, fmt.Errorf("ssh: unsupported key exchange %s for server", kex) + } + } + + s := &connection{ + sshConn: sshConn{conn: c}, + } + perms, err := s.serverHandshake(&fullConf) + if err != nil { + c.Close() + return nil, nil, nil, err + } + return &ServerConn{s, perms}, s.mux.incomingChannels, s.mux.incomingRequests, nil +} + +// signAndMarshal signs the data with the appropriate algorithm, +// and serializes the result in SSH wire format. algo is the negotiate +// algorithm and may be a certificate type. +func signAndMarshal(k AlgorithmSigner, rand io.Reader, data []byte, algo string) ([]byte, error) { + sig, err := k.SignWithAlgorithm(rand, data, underlyingAlgo(algo)) + if err != nil { + return nil, err + } + + return Marshal(sig), nil +} + +// handshake performs key exchange and user authentication. +func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) { + if len(config.hostKeys) == 0 { + return nil, errors.New("ssh: server has no host keys") + } + + if !config.NoClientAuth && config.PasswordCallback == nil && config.PublicKeyCallback == nil && + config.KeyboardInteractiveCallback == nil && (config.GSSAPIWithMICConfig == nil || + config.GSSAPIWithMICConfig.AllowLogin == nil || config.GSSAPIWithMICConfig.Server == nil) { + return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") + } + + if config.ServerVersion != "" { + s.serverVersion = []byte(config.ServerVersion) + } else { + s.serverVersion = []byte(packageVersion) + } + var err error + s.clientVersion, err = exchangeVersions(s.sshConn.conn, s.serverVersion) + if err != nil { + return nil, err + } + + tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */) + s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config) + + if err := s.transport.waitSession(); err != nil { + return nil, err + } + + // We just did the key change, so the session ID is established. + s.sessionID = s.transport.getSessionID() + + var packet []byte + if packet, err = s.transport.readPacket(); err != nil { + return nil, err + } + + var serviceRequest serviceRequestMsg + if err = Unmarshal(packet, &serviceRequest); err != nil { + return nil, err + } + if serviceRequest.Service != serviceUserAuth { + return nil, errors.New("ssh: requested service '" + serviceRequest.Service + "' before authenticating") + } + serviceAccept := serviceAcceptMsg{ + Service: serviceUserAuth, + } + if err := s.transport.writePacket(Marshal(&serviceAccept)); err != nil { + return nil, err + } + + perms, err := s.serverAuthenticate(config) + if err != nil { + return nil, err + } + s.mux = newMux(s.transport) + return perms, err +} + +func checkSourceAddress(addr net.Addr, sourceAddrs string) error { + if addr == nil { + return errors.New("ssh: no address known for client, but source-address match required") + } + + tcpAddr, ok := addr.(*net.TCPAddr) + if !ok { + return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr) + } + + for _, sourceAddr := range strings.Split(sourceAddrs, ",") { + if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil { + if allowedIP.Equal(tcpAddr.IP) { + return nil + } + } else { + _, ipNet, err := net.ParseCIDR(sourceAddr) + if err != nil { + return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err) + } + + if ipNet.Contains(tcpAddr.IP) { + return nil + } + } + } + + return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr) +} + +func gssExchangeToken(gssapiConfig *GSSAPIWithMICConfig, token []byte, s *connection, + sessionID []byte, userAuthReq userAuthRequestMsg) (authErr error, perms *Permissions, err error) { + gssAPIServer := gssapiConfig.Server + defer gssAPIServer.DeleteSecContext() + var srcName string + for { + var ( + outToken []byte + needContinue bool + ) + outToken, srcName, needContinue, err = gssAPIServer.AcceptSecContext(token) + if err != nil { + return err, nil, nil + } + if len(outToken) != 0 { + if err := s.transport.writePacket(Marshal(&userAuthGSSAPIToken{ + Token: outToken, + })); err != nil { + return nil, nil, err + } + } + if !needContinue { + break + } + packet, err := s.transport.readPacket() + if err != nil { + return nil, nil, err + } + userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} + if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { + return nil, nil, err + } + token = userAuthGSSAPITokenReq.Token + } + packet, err := s.transport.readPacket() + if err != nil { + return nil, nil, err + } + userAuthGSSAPIMICReq := &userAuthGSSAPIMIC{} + if err := Unmarshal(packet, userAuthGSSAPIMICReq); err != nil { + return nil, nil, err + } + mic := buildMIC(string(sessionID), userAuthReq.User, userAuthReq.Service, userAuthReq.Method) + if err := gssAPIServer.VerifyMIC(mic, userAuthGSSAPIMICReq.MIC); err != nil { + return err, nil, nil + } + perms, authErr = gssapiConfig.AllowLogin(s, srcName) + return authErr, perms, nil +} + +// isAlgoCompatible checks if the signature format is compatible with the +// selected algorithm taking into account edge cases that occur with old +// clients. +func isAlgoCompatible(algo, sigFormat string) bool { + // Compatibility for old clients. + // + // For certificate authentication with OpenSSH 7.2-7.7 signature format can + // be rsa-sha2-256 or rsa-sha2-512 for the algorithm + // ssh-rsa-cert-v01@openssh.com. + // + // With gpg-agent < 2.2.6 the algorithm can be rsa-sha2-256 or rsa-sha2-512 + // for signature format ssh-rsa. + if isRSA(algo) && isRSA(sigFormat) { + return true + } + // Standard case: the underlying algorithm must match the signature format. + return underlyingAlgo(algo) == sigFormat +} + +// ServerAuthError represents server authentication errors and is +// sometimes returned by NewServerConn. It appends any authentication +// errors that may occur, and is returned if all of the authentication +// methods provided by the user failed to authenticate. +type ServerAuthError struct { + // Errors contains authentication errors returned by the authentication + // callback methods. The first entry is typically ErrNoAuth. + Errors []error +} + +func (l ServerAuthError) Error() string { + var errs []string + for _, err := range l.Errors { + errs = append(errs, err.Error()) + } + return "[" + strings.Join(errs, ", ") + "]" +} + +// ServerAuthCallbacks defines server-side authentication callbacks. +type ServerAuthCallbacks struct { + // PasswordCallback behaves like [ServerConfig.PasswordCallback]. + PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error) + + // PublicKeyCallback behaves like [ServerConfig.PublicKeyCallback]. + PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) + + // KeyboardInteractiveCallback behaves like [ServerConfig.KeyboardInteractiveCallback]. + KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error) + + // GSSAPIWithMICConfig behaves like [ServerConfig.GSSAPIWithMICConfig]. + GSSAPIWithMICConfig *GSSAPIWithMICConfig +} + +// PartialSuccessError can be returned by any of the [ServerConfig] +// authentication callbacks to indicate to the client that authentication has +// partially succeeded, but further steps are required. +type PartialSuccessError struct { + // Next defines the authentication callbacks to apply to further steps. The + // available methods communicated to the client are based on the non-nil + // ServerAuthCallbacks fields. + Next ServerAuthCallbacks +} + +func (p *PartialSuccessError) Error() string { + return "ssh: authenticated with partial success" +} + +// ErrNoAuth is the error value returned if no +// authentication method has been passed yet. This happens as a normal +// part of the authentication loop, since the client first tries +// 'none' authentication to discover available methods. +// It is returned in ServerAuthError.Errors from NewServerConn. +var ErrNoAuth = errors.New("ssh: no auth passed yet") + +// BannerError is an error that can be returned by authentication handlers in +// ServerConfig to send a banner message to the client. +type BannerError struct { + Err error + Message string +} + +func (b *BannerError) Unwrap() error { + return b.Err +} + +func (b *BannerError) Error() string { + if b.Err == nil { + return b.Message + } + return b.Err.Error() +} + +func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { + sessionID := s.transport.getSessionID() + var cache pubKeyCache + var perms *Permissions + + authFailures := 0 + noneAuthCount := 0 + var authErrs []error + var displayedBanner bool + partialSuccessReturned := false + // Set the initial authentication callbacks from the config. They can be + // changed if a PartialSuccessError is returned. + authConfig := ServerAuthCallbacks{ + PasswordCallback: config.PasswordCallback, + PublicKeyCallback: config.PublicKeyCallback, + KeyboardInteractiveCallback: config.KeyboardInteractiveCallback, + GSSAPIWithMICConfig: config.GSSAPIWithMICConfig, + } + +userAuthLoop: + for { + if authFailures >= config.MaxAuthTries && config.MaxAuthTries > 0 { + discMsg := &disconnectMsg{ + Reason: 2, + Message: "too many authentication failures", + } + + if err := s.transport.writePacket(Marshal(discMsg)); err != nil { + return nil, err + } + + return nil, discMsg + } + + var userAuthReq userAuthRequestMsg + if packet, err := s.transport.readPacket(); err != nil { + if err == io.EOF { + return nil, &ServerAuthError{Errors: authErrs} + } + return nil, err + } else if err = Unmarshal(packet, &userAuthReq); err != nil { + return nil, err + } + + if userAuthReq.Service != serviceSSH { + return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service) + } + + if s.user != userAuthReq.User && partialSuccessReturned { + return nil, fmt.Errorf("ssh: client changed the user after a partial success authentication, previous user %q, current user %q", + s.user, userAuthReq.User) + } + + s.user = userAuthReq.User + + if !displayedBanner && config.BannerCallback != nil { + displayedBanner = true + msg := config.BannerCallback(s) + if msg != "" { + bannerMsg := &userAuthBannerMsg{ + Message: msg, + } + if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { + return nil, err + } + } + } + + perms = nil + authErr := ErrNoAuth + + switch userAuthReq.Method { + case "none": + noneAuthCount++ + // We don't allow none authentication after a partial success + // response. + if config.NoClientAuth && !partialSuccessReturned { + if config.NoClientAuthCallback != nil { + perms, authErr = config.NoClientAuthCallback(s) + } else { + authErr = nil + } + } + case "password": + if authConfig.PasswordCallback == nil { + authErr = errors.New("ssh: password auth not configured") + break + } + payload := userAuthReq.Payload + if len(payload) < 1 || payload[0] != 0 { + return nil, parseError(msgUserAuthRequest) + } + payload = payload[1:] + password, payload, ok := parseString(payload) + if !ok || len(payload) > 0 { + return nil, parseError(msgUserAuthRequest) + } + + perms, authErr = authConfig.PasswordCallback(s, password) + case "keyboard-interactive": + if authConfig.KeyboardInteractiveCallback == nil { + authErr = errors.New("ssh: keyboard-interactive auth not configured") + break + } + + prompter := &sshClientKeyboardInteractive{s} + perms, authErr = authConfig.KeyboardInteractiveCallback(s, prompter.Challenge) + case "publickey": + if authConfig.PublicKeyCallback == nil { + authErr = errors.New("ssh: publickey auth not configured") + break + } + payload := userAuthReq.Payload + if len(payload) < 1 { + return nil, parseError(msgUserAuthRequest) + } + isQuery := payload[0] == 0 + payload = payload[1:] + algoBytes, payload, ok := parseString(payload) + if !ok { + return nil, parseError(msgUserAuthRequest) + } + algo := string(algoBytes) + if !contains(config.PublicKeyAuthAlgorithms, underlyingAlgo(algo)) { + authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo) + break + } + + pubKeyData, payload, ok := parseString(payload) + if !ok { + return nil, parseError(msgUserAuthRequest) + } + + pubKey, err := ParsePublicKey(pubKeyData) + if err != nil { + return nil, err + } + + candidate, ok := cache.get(s.user, pubKeyData) + if !ok { + candidate.user = s.user + candidate.pubKeyData = pubKeyData + candidate.perms, candidate.result = authConfig.PublicKeyCallback(s, pubKey) + _, isPartialSuccessError := candidate.result.(*PartialSuccessError) + + if (candidate.result == nil || isPartialSuccessError) && + candidate.perms != nil && + candidate.perms.CriticalOptions != nil && + candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { + if err := checkSourceAddress( + s.RemoteAddr(), + candidate.perms.CriticalOptions[sourceAddressCriticalOption]); err != nil { + candidate.result = err + } + } + cache.add(candidate) + } + + if isQuery { + // The client can query if the given public key + // would be okay. + + if len(payload) > 0 { + return nil, parseError(msgUserAuthRequest) + } + _, isPartialSuccessError := candidate.result.(*PartialSuccessError) + if candidate.result == nil || isPartialSuccessError { + okMsg := userAuthPubKeyOkMsg{ + Algo: algo, + PubKey: pubKeyData, + } + if err = s.transport.writePacket(Marshal(&okMsg)); err != nil { + return nil, err + } + continue userAuthLoop + } + authErr = candidate.result + } else { + sig, payload, ok := parseSignature(payload) + if !ok || len(payload) > 0 { + return nil, parseError(msgUserAuthRequest) + } + // Ensure the declared public key algo is compatible with the + // decoded one. This check will ensure we don't accept e.g. + // ssh-rsa-cert-v01@openssh.com algorithm with ssh-rsa public + // key type. The algorithm and public key type must be + // consistent: both must be certificate algorithms, or neither. + if !contains(algorithmsForKeyFormat(pubKey.Type()), algo) { + authErr = fmt.Errorf("ssh: public key type %q not compatible with selected algorithm %q", + pubKey.Type(), algo) + break + } + // Ensure the public key algo and signature algo + // are supported. Compare the private key + // algorithm name that corresponds to algo with + // sig.Format. This is usually the same, but + // for certs, the names differ. + if !contains(config.PublicKeyAuthAlgorithms, sig.Format) { + authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format) + break + } + if !isAlgoCompatible(algo, sig.Format) { + authErr = fmt.Errorf("ssh: signature %q not compatible with selected algorithm %q", sig.Format, algo) + break + } + + signedData := buildDataSignedForAuth(sessionID, userAuthReq, algo, pubKeyData) + + if err := pubKey.Verify(signedData, sig); err != nil { + return nil, err + } + + authErr = candidate.result + perms = candidate.perms + } + case "gssapi-with-mic": + if authConfig.GSSAPIWithMICConfig == nil { + authErr = errors.New("ssh: gssapi-with-mic auth not configured") + break + } + gssapiConfig := authConfig.GSSAPIWithMICConfig + userAuthRequestGSSAPI, err := parseGSSAPIPayload(userAuthReq.Payload) + if err != nil { + return nil, parseError(msgUserAuthRequest) + } + // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication. + if userAuthRequestGSSAPI.N == 0 { + authErr = fmt.Errorf("ssh: Mechanism negotiation is not supported") + break + } + var i uint32 + present := false + for i = 0; i < userAuthRequestGSSAPI.N; i++ { + if userAuthRequestGSSAPI.OIDS[i].Equal(krb5Mesh) { + present = true + break + } + } + if !present { + authErr = fmt.Errorf("ssh: GSSAPI authentication must use the Kerberos V5 mechanism") + break + } + // Initial server response, see RFC 4462 section 3.3. + if err := s.transport.writePacket(Marshal(&userAuthGSSAPIResponse{ + SupportMech: krb5OID, + })); err != nil { + return nil, err + } + // Exchange token, see RFC 4462 section 3.4. + packet, err := s.transport.readPacket() + if err != nil { + return nil, err + } + userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} + if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { + return nil, err + } + authErr, perms, err = gssExchangeToken(gssapiConfig, userAuthGSSAPITokenReq.Token, s, sessionID, + userAuthReq) + if err != nil { + return nil, err + } + default: + authErr = fmt.Errorf("ssh: unknown method %q", userAuthReq.Method) + } + + authErrs = append(authErrs, authErr) + + if config.AuthLogCallback != nil { + config.AuthLogCallback(s, userAuthReq.Method, authErr) + } + + var bannerErr *BannerError + if errors.As(authErr, &bannerErr) { + if bannerErr.Message != "" { + bannerMsg := &userAuthBannerMsg{ + Message: bannerErr.Message, + } + if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { + return nil, err + } + } + } + + if authErr == nil { + break userAuthLoop + } + + var failureMsg userAuthFailureMsg + + if partialSuccess, ok := authErr.(*PartialSuccessError); ok { + // After a partial success error we don't allow changing the user + // name and execute the NoClientAuthCallback. + partialSuccessReturned = true + + // In case a partial success is returned, the server may send + // a new set of authentication methods. + authConfig = partialSuccess.Next + + // Reset pubkey cache, as the new PublicKeyCallback might + // accept a different set of public keys. + cache = pubKeyCache{} + + // Send back a partial success message to the user. + failureMsg.PartialSuccess = true + } else { + // Allow initial attempt of 'none' without penalty. + if authFailures > 0 || userAuthReq.Method != "none" || noneAuthCount != 1 { + authFailures++ + } + if config.MaxAuthTries > 0 && authFailures >= config.MaxAuthTries { + // If we have hit the max attempts, don't bother sending the + // final SSH_MSG_USERAUTH_FAILURE message, since there are + // no more authentication methods which can be attempted, + // and this message may cause the client to re-attempt + // authentication while we send the disconnect message. + // Continue, and trigger the disconnect at the start of + // the loop. + // + // The SSH specification is somewhat confusing about this, + // RFC 4252 Section 5.1 requires each authentication failure + // be responded to with a respective SSH_MSG_USERAUTH_FAILURE + // message, but Section 4 says the server should disconnect + // after some number of attempts, but it isn't explicit which + // message should take precedence (i.e. should there be a failure + // message than a disconnect message, or if we are going to + // disconnect, should we only send that message.) + // + // Either way, OpenSSH disconnects immediately after the last + // failed authentication attempt, and given they are typically + // considered the golden implementation it seems reasonable + // to match that behavior. + continue + } + } + + if authConfig.PasswordCallback != nil { + failureMsg.Methods = append(failureMsg.Methods, "password") + } + if authConfig.PublicKeyCallback != nil { + failureMsg.Methods = append(failureMsg.Methods, "publickey") + } + if authConfig.KeyboardInteractiveCallback != nil { + failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive") + } + if authConfig.GSSAPIWithMICConfig != nil && authConfig.GSSAPIWithMICConfig.Server != nil && + authConfig.GSSAPIWithMICConfig.AllowLogin != nil { + failureMsg.Methods = append(failureMsg.Methods, "gssapi-with-mic") + } + + if len(failureMsg.Methods) == 0 { + return nil, errors.New("ssh: no authentication methods available") + } + + if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil { + return nil, err + } + } + + if err := s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil { + return nil, err + } + return perms, nil +} + +// sshClientKeyboardInteractive implements a ClientKeyboardInteractive by +// asking the client on the other side of a ServerConn. +type sshClientKeyboardInteractive struct { + *connection +} + +func (c *sshClientKeyboardInteractive) Challenge(name, instruction string, questions []string, echos []bool) (answers []string, err error) { + if len(questions) != len(echos) { + return nil, errors.New("ssh: echos and questions must have equal length") + } + + var prompts []byte + for i := range questions { + prompts = appendString(prompts, questions[i]) + prompts = appendBool(prompts, echos[i]) + } + + if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{ + Name: name, + Instruction: instruction, + NumPrompts: uint32(len(questions)), + Prompts: prompts, + })); err != nil { + return nil, err + } + + packet, err := c.transport.readPacket() + if err != nil { + return nil, err + } + if packet[0] != msgUserAuthInfoResponse { + return nil, unexpectedMessageError(msgUserAuthInfoResponse, packet[0]) + } + packet = packet[1:] + + n, packet, ok := parseUint32(packet) + if !ok || int(n) != len(questions) { + return nil, parseError(msgUserAuthInfoResponse) + } + + for i := uint32(0); i < n; i++ { + ans, rest, ok := parseString(packet) + if !ok { + return nil, parseError(msgUserAuthInfoResponse) + } + + answers = append(answers, string(ans)) + packet = rest + } + if len(packet) != 0 { + return nil, errors.New("ssh: junk at end of message") + } + + return answers, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/session.go b/vendor/golang.org/x/crypto/ssh/session.go new file mode 100644 index 000000000..acef62259 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/session.go @@ -0,0 +1,647 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +// Session implements an interactive session described in +// "RFC 4254, section 6". + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "sync" +) + +type Signal string + +// POSIX signals as listed in RFC 4254 Section 6.10. +const ( + SIGABRT Signal = "ABRT" + SIGALRM Signal = "ALRM" + SIGFPE Signal = "FPE" + SIGHUP Signal = "HUP" + SIGILL Signal = "ILL" + SIGINT Signal = "INT" + SIGKILL Signal = "KILL" + SIGPIPE Signal = "PIPE" + SIGQUIT Signal = "QUIT" + SIGSEGV Signal = "SEGV" + SIGTERM Signal = "TERM" + SIGUSR1 Signal = "USR1" + SIGUSR2 Signal = "USR2" +) + +var signals = map[Signal]int{ + SIGABRT: 6, + SIGALRM: 14, + SIGFPE: 8, + SIGHUP: 1, + SIGILL: 4, + SIGINT: 2, + SIGKILL: 9, + SIGPIPE: 13, + SIGQUIT: 3, + SIGSEGV: 11, + SIGTERM: 15, +} + +type TerminalModes map[uint8]uint32 + +// POSIX terminal mode flags as listed in RFC 4254 Section 8. +const ( + tty_OP_END = 0 + VINTR = 1 + VQUIT = 2 + VERASE = 3 + VKILL = 4 + VEOF = 5 + VEOL = 6 + VEOL2 = 7 + VSTART = 8 + VSTOP = 9 + VSUSP = 10 + VDSUSP = 11 + VREPRINT = 12 + VWERASE = 13 + VLNEXT = 14 + VFLUSH = 15 + VSWTCH = 16 + VSTATUS = 17 + VDISCARD = 18 + IGNPAR = 30 + PARMRK = 31 + INPCK = 32 + ISTRIP = 33 + INLCR = 34 + IGNCR = 35 + ICRNL = 36 + IUCLC = 37 + IXON = 38 + IXANY = 39 + IXOFF = 40 + IMAXBEL = 41 + IUTF8 = 42 // RFC 8160 + ISIG = 50 + ICANON = 51 + XCASE = 52 + ECHO = 53 + ECHOE = 54 + ECHOK = 55 + ECHONL = 56 + NOFLSH = 57 + TOSTOP = 58 + IEXTEN = 59 + ECHOCTL = 60 + ECHOKE = 61 + PENDIN = 62 + OPOST = 70 + OLCUC = 71 + ONLCR = 72 + OCRNL = 73 + ONOCR = 74 + ONLRET = 75 + CS7 = 90 + CS8 = 91 + PARENB = 92 + PARODD = 93 + TTY_OP_ISPEED = 128 + TTY_OP_OSPEED = 129 +) + +// A Session represents a connection to a remote command or shell. +type Session struct { + // Stdin specifies the remote process's standard input. + // If Stdin is nil, the remote process reads from an empty + // bytes.Buffer. + Stdin io.Reader + + // Stdout and Stderr specify the remote process's standard + // output and error. + // + // If either is nil, Run connects the corresponding file + // descriptor to an instance of io.Discard. There is a + // fixed amount of buffering that is shared for the two streams. + // If either blocks it may eventually cause the remote + // command to block. + Stdout io.Writer + Stderr io.Writer + + ch Channel // the channel backing this session + started bool // true once Start, Run or Shell is invoked. + copyFuncs []func() error + errors chan error // one send per copyFunc + + // true if pipe method is active + stdinpipe, stdoutpipe, stderrpipe bool + + // stdinPipeWriter is non-nil if StdinPipe has not been called + // and Stdin was specified by the user; it is the write end of + // a pipe connecting Session.Stdin to the stdin channel. + stdinPipeWriter io.WriteCloser + + exitStatus chan error +} + +// SendRequest sends an out-of-band channel request on the SSH channel +// underlying the session. +func (s *Session) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { + return s.ch.SendRequest(name, wantReply, payload) +} + +func (s *Session) Close() error { + return s.ch.Close() +} + +// RFC 4254 Section 6.4. +type setenvRequest struct { + Name string + Value string +} + +// Setenv sets an environment variable that will be applied to any +// command executed by Shell or Run. +func (s *Session) Setenv(name, value string) error { + msg := setenvRequest{ + Name: name, + Value: value, + } + ok, err := s.ch.SendRequest("env", true, Marshal(&msg)) + if err == nil && !ok { + err = errors.New("ssh: setenv failed") + } + return err +} + +// RFC 4254 Section 6.2. +type ptyRequestMsg struct { + Term string + Columns uint32 + Rows uint32 + Width uint32 + Height uint32 + Modelist string +} + +// RequestPty requests the association of a pty with the session on the remote host. +func (s *Session) RequestPty(term string, h, w int, termmodes TerminalModes) error { + var tm []byte + for k, v := range termmodes { + kv := struct { + Key byte + Val uint32 + }{k, v} + + tm = append(tm, Marshal(&kv)...) + } + tm = append(tm, tty_OP_END) + req := ptyRequestMsg{ + Term: term, + Columns: uint32(w), + Rows: uint32(h), + Width: uint32(w * 8), + Height: uint32(h * 8), + Modelist: string(tm), + } + ok, err := s.ch.SendRequest("pty-req", true, Marshal(&req)) + if err == nil && !ok { + err = errors.New("ssh: pty-req failed") + } + return err +} + +// RFC 4254 Section 6.5. +type subsystemRequestMsg struct { + Subsystem string +} + +// RequestSubsystem requests the association of a subsystem with the session on the remote host. +// A subsystem is a predefined command that runs in the background when the ssh session is initiated +func (s *Session) RequestSubsystem(subsystem string) error { + msg := subsystemRequestMsg{ + Subsystem: subsystem, + } + ok, err := s.ch.SendRequest("subsystem", true, Marshal(&msg)) + if err == nil && !ok { + err = errors.New("ssh: subsystem request failed") + } + return err +} + +// RFC 4254 Section 6.7. +type ptyWindowChangeMsg struct { + Columns uint32 + Rows uint32 + Width uint32 + Height uint32 +} + +// WindowChange informs the remote host about a terminal window dimension change to h rows and w columns. +func (s *Session) WindowChange(h, w int) error { + req := ptyWindowChangeMsg{ + Columns: uint32(w), + Rows: uint32(h), + Width: uint32(w * 8), + Height: uint32(h * 8), + } + _, err := s.ch.SendRequest("window-change", false, Marshal(&req)) + return err +} + +// RFC 4254 Section 6.9. +type signalMsg struct { + Signal string +} + +// Signal sends the given signal to the remote process. +// sig is one of the SIG* constants. +func (s *Session) Signal(sig Signal) error { + msg := signalMsg{ + Signal: string(sig), + } + + _, err := s.ch.SendRequest("signal", false, Marshal(&msg)) + return err +} + +// RFC 4254 Section 6.5. +type execMsg struct { + Command string +} + +// Start runs cmd on the remote host. Typically, the remote +// server passes cmd to the shell for interpretation. +// A Session only accepts one call to Run, Start or Shell. +func (s *Session) Start(cmd string) error { + if s.started { + return errors.New("ssh: session already started") + } + req := execMsg{ + Command: cmd, + } + + ok, err := s.ch.SendRequest("exec", true, Marshal(&req)) + if err == nil && !ok { + err = fmt.Errorf("ssh: command %v failed", cmd) + } + if err != nil { + return err + } + return s.start() +} + +// Run runs cmd on the remote host. Typically, the remote +// server passes cmd to the shell for interpretation. +// A Session only accepts one call to Run, Start, Shell, Output, +// or CombinedOutput. +// +// The returned error is nil if the command runs, has no problems +// copying stdin, stdout, and stderr, and exits with a zero exit +// status. +// +// If the remote server does not send an exit status, an error of type +// *ExitMissingError is returned. If the command completes +// unsuccessfully or is interrupted by a signal, the error is of type +// *ExitError. Other error types may be returned for I/O problems. +func (s *Session) Run(cmd string) error { + err := s.Start(cmd) + if err != nil { + return err + } + return s.Wait() +} + +// Output runs cmd on the remote host and returns its standard output. +func (s *Session) Output(cmd string) ([]byte, error) { + if s.Stdout != nil { + return nil, errors.New("ssh: Stdout already set") + } + var b bytes.Buffer + s.Stdout = &b + err := s.Run(cmd) + return b.Bytes(), err +} + +type singleWriter struct { + b bytes.Buffer + mu sync.Mutex +} + +func (w *singleWriter) Write(p []byte) (int, error) { + w.mu.Lock() + defer w.mu.Unlock() + return w.b.Write(p) +} + +// CombinedOutput runs cmd on the remote host and returns its combined +// standard output and standard error. +func (s *Session) CombinedOutput(cmd string) ([]byte, error) { + if s.Stdout != nil { + return nil, errors.New("ssh: Stdout already set") + } + if s.Stderr != nil { + return nil, errors.New("ssh: Stderr already set") + } + var b singleWriter + s.Stdout = &b + s.Stderr = &b + err := s.Run(cmd) + return b.b.Bytes(), err +} + +// Shell starts a login shell on the remote host. A Session only +// accepts one call to Run, Start, Shell, Output, or CombinedOutput. +func (s *Session) Shell() error { + if s.started { + return errors.New("ssh: session already started") + } + + ok, err := s.ch.SendRequest("shell", true, nil) + if err == nil && !ok { + return errors.New("ssh: could not start shell") + } + if err != nil { + return err + } + return s.start() +} + +func (s *Session) start() error { + s.started = true + + type F func(*Session) + for _, setupFd := range []F{(*Session).stdin, (*Session).stdout, (*Session).stderr} { + setupFd(s) + } + + s.errors = make(chan error, len(s.copyFuncs)) + for _, fn := range s.copyFuncs { + go func(fn func() error) { + s.errors <- fn() + }(fn) + } + return nil +} + +// Wait waits for the remote command to exit. +// +// The returned error is nil if the command runs, has no problems +// copying stdin, stdout, and stderr, and exits with a zero exit +// status. +// +// If the remote server does not send an exit status, an error of type +// *ExitMissingError is returned. If the command completes +// unsuccessfully or is interrupted by a signal, the error is of type +// *ExitError. Other error types may be returned for I/O problems. +func (s *Session) Wait() error { + if !s.started { + return errors.New("ssh: session not started") + } + waitErr := <-s.exitStatus + + if s.stdinPipeWriter != nil { + s.stdinPipeWriter.Close() + } + var copyError error + for range s.copyFuncs { + if err := <-s.errors; err != nil && copyError == nil { + copyError = err + } + } + if waitErr != nil { + return waitErr + } + return copyError +} + +func (s *Session) wait(reqs <-chan *Request) error { + wm := Waitmsg{status: -1} + // Wait for msg channel to be closed before returning. + for msg := range reqs { + switch msg.Type { + case "exit-status": + wm.status = int(binary.BigEndian.Uint32(msg.Payload)) + case "exit-signal": + var sigval struct { + Signal string + CoreDumped bool + Error string + Lang string + } + if err := Unmarshal(msg.Payload, &sigval); err != nil { + return err + } + + // Must sanitize strings? + wm.signal = sigval.Signal + wm.msg = sigval.Error + wm.lang = sigval.Lang + default: + // This handles keepalives and matches + // OpenSSH's behaviour. + if msg.WantReply { + msg.Reply(false, nil) + } + } + } + if wm.status == 0 { + return nil + } + if wm.status == -1 { + // exit-status was never sent from server + if wm.signal == "" { + // signal was not sent either. RFC 4254 + // section 6.10 recommends against this + // behavior, but it is allowed, so we let + // clients handle it. + return &ExitMissingError{} + } + wm.status = 128 + if _, ok := signals[Signal(wm.signal)]; ok { + wm.status += signals[Signal(wm.signal)] + } + } + + return &ExitError{wm} +} + +// ExitMissingError is returned if a session is torn down cleanly, but +// the server sends no confirmation of the exit status. +type ExitMissingError struct{} + +func (e *ExitMissingError) Error() string { + return "wait: remote command exited without exit status or exit signal" +} + +func (s *Session) stdin() { + if s.stdinpipe { + return + } + var stdin io.Reader + if s.Stdin == nil { + stdin = new(bytes.Buffer) + } else { + r, w := io.Pipe() + go func() { + _, err := io.Copy(w, s.Stdin) + w.CloseWithError(err) + }() + stdin, s.stdinPipeWriter = r, w + } + s.copyFuncs = append(s.copyFuncs, func() error { + _, err := io.Copy(s.ch, stdin) + if err1 := s.ch.CloseWrite(); err == nil && err1 != io.EOF { + err = err1 + } + return err + }) +} + +func (s *Session) stdout() { + if s.stdoutpipe { + return + } + if s.Stdout == nil { + s.Stdout = io.Discard + } + s.copyFuncs = append(s.copyFuncs, func() error { + _, err := io.Copy(s.Stdout, s.ch) + return err + }) +} + +func (s *Session) stderr() { + if s.stderrpipe { + return + } + if s.Stderr == nil { + s.Stderr = io.Discard + } + s.copyFuncs = append(s.copyFuncs, func() error { + _, err := io.Copy(s.Stderr, s.ch.Stderr()) + return err + }) +} + +// sessionStdin reroutes Close to CloseWrite. +type sessionStdin struct { + io.Writer + ch Channel +} + +func (s *sessionStdin) Close() error { + return s.ch.CloseWrite() +} + +// StdinPipe returns a pipe that will be connected to the +// remote command's standard input when the command starts. +func (s *Session) StdinPipe() (io.WriteCloser, error) { + if s.Stdin != nil { + return nil, errors.New("ssh: Stdin already set") + } + if s.started { + return nil, errors.New("ssh: StdinPipe after process started") + } + s.stdinpipe = true + return &sessionStdin{s.ch, s.ch}, nil +} + +// StdoutPipe returns a pipe that will be connected to the +// remote command's standard output when the command starts. +// There is a fixed amount of buffering that is shared between +// stdout and stderr streams. If the StdoutPipe reader is +// not serviced fast enough it may eventually cause the +// remote command to block. +func (s *Session) StdoutPipe() (io.Reader, error) { + if s.Stdout != nil { + return nil, errors.New("ssh: Stdout already set") + } + if s.started { + return nil, errors.New("ssh: StdoutPipe after process started") + } + s.stdoutpipe = true + return s.ch, nil +} + +// StderrPipe returns a pipe that will be connected to the +// remote command's standard error when the command starts. +// There is a fixed amount of buffering that is shared between +// stdout and stderr streams. If the StderrPipe reader is +// not serviced fast enough it may eventually cause the +// remote command to block. +func (s *Session) StderrPipe() (io.Reader, error) { + if s.Stderr != nil { + return nil, errors.New("ssh: Stderr already set") + } + if s.started { + return nil, errors.New("ssh: StderrPipe after process started") + } + s.stderrpipe = true + return s.ch.Stderr(), nil +} + +// newSession returns a new interactive session on the remote host. +func newSession(ch Channel, reqs <-chan *Request) (*Session, error) { + s := &Session{ + ch: ch, + } + s.exitStatus = make(chan error, 1) + go func() { + s.exitStatus <- s.wait(reqs) + }() + + return s, nil +} + +// An ExitError reports unsuccessful completion of a remote command. +type ExitError struct { + Waitmsg +} + +func (e *ExitError) Error() string { + return e.Waitmsg.String() +} + +// Waitmsg stores the information about an exited remote command +// as reported by Wait. +type Waitmsg struct { + status int + signal string + msg string + lang string +} + +// ExitStatus returns the exit status of the remote command. +func (w Waitmsg) ExitStatus() int { + return w.status +} + +// Signal returns the exit signal of the remote command if +// it was terminated violently. +func (w Waitmsg) Signal() string { + return w.signal +} + +// Msg returns the exit message given by the remote command +func (w Waitmsg) Msg() string { + return w.msg +} + +// Lang returns the language tag. See RFC 3066 +func (w Waitmsg) Lang() string { + return w.lang +} + +func (w Waitmsg) String() string { + str := fmt.Sprintf("Process exited with status %v", w.status) + if w.signal != "" { + str += fmt.Sprintf(" from signal %v", w.signal) + } + if w.msg != "" { + str += fmt.Sprintf(". Reason was: %v", w.msg) + } + return str +} diff --git a/vendor/golang.org/x/crypto/ssh/ssh_gss.go b/vendor/golang.org/x/crypto/ssh/ssh_gss.go new file mode 100644 index 000000000..24bd7c8e8 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/ssh_gss.go @@ -0,0 +1,139 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "encoding/asn1" + "errors" +) + +var krb5OID []byte + +func init() { + krb5OID, _ = asn1.Marshal(krb5Mesh) +} + +// GSSAPIClient provides the API to plug-in GSSAPI authentication for client logins. +type GSSAPIClient interface { + // InitSecContext initiates the establishment of a security context for GSS-API between the + // ssh client and ssh server. Initially the token parameter should be specified as nil. + // The routine may return a outputToken which should be transferred to + // the ssh server, where the ssh server will present it to + // AcceptSecContext. If no token need be sent, InitSecContext will indicate this by setting + // needContinue to false. To complete the context + // establishment, one or more reply tokens may be required from the ssh + // server;if so, InitSecContext will return a needContinue which is true. + // In this case, InitSecContext should be called again when the + // reply token is received from the ssh server, passing the reply + // token to InitSecContext via the token parameters. + // See RFC 2743 section 2.2.1 and RFC 4462 section 3.4. + InitSecContext(target string, token []byte, isGSSDelegCreds bool) (outputToken []byte, needContinue bool, err error) + // GetMIC generates a cryptographic MIC for the SSH2 message, and places + // the MIC in a token for transfer to the ssh server. + // The contents of the MIC field are obtained by calling GSS_GetMIC() + // over the following, using the GSS-API context that was just + // established: + // string session identifier + // byte SSH_MSG_USERAUTH_REQUEST + // string user name + // string service + // string "gssapi-with-mic" + // See RFC 2743 section 2.3.1 and RFC 4462 3.5. + GetMIC(micFiled []byte) ([]byte, error) + // Whenever possible, it should be possible for + // DeleteSecContext() calls to be successfully processed even + // if other calls cannot succeed, thereby enabling context-related + // resources to be released. + // In addition to deleting established security contexts, + // gss_delete_sec_context must also be able to delete "half-built" + // security contexts resulting from an incomplete sequence of + // InitSecContext()/AcceptSecContext() calls. + // See RFC 2743 section 2.2.3. + DeleteSecContext() error +} + +// GSSAPIServer provides the API to plug in GSSAPI authentication for server logins. +type GSSAPIServer interface { + // AcceptSecContext allows a remotely initiated security context between the application + // and a remote peer to be established by the ssh client. The routine may return a + // outputToken which should be transferred to the ssh client, + // where the ssh client will present it to InitSecContext. + // If no token need be sent, AcceptSecContext will indicate this + // by setting the needContinue to false. To + // complete the context establishment, one or more reply tokens may be + // required from the ssh client. if so, AcceptSecContext + // will return a needContinue which is true, in which case it + // should be called again when the reply token is received from the ssh + // client, passing the token to AcceptSecContext via the + // token parameters. + // The srcName return value is the authenticated username. + // See RFC 2743 section 2.2.2 and RFC 4462 section 3.4. + AcceptSecContext(token []byte) (outputToken []byte, srcName string, needContinue bool, err error) + // VerifyMIC verifies that a cryptographic MIC, contained in the token parameter, + // fits the supplied message is received from the ssh client. + // See RFC 2743 section 2.3.2. + VerifyMIC(micField []byte, micToken []byte) error + // Whenever possible, it should be possible for + // DeleteSecContext() calls to be successfully processed even + // if other calls cannot succeed, thereby enabling context-related + // resources to be released. + // In addition to deleting established security contexts, + // gss_delete_sec_context must also be able to delete "half-built" + // security contexts resulting from an incomplete sequence of + // InitSecContext()/AcceptSecContext() calls. + // See RFC 2743 section 2.2.3. + DeleteSecContext() error +} + +var ( + // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication, + // so we also support the krb5 mechanism only. + // See RFC 1964 section 1. + krb5Mesh = asn1.ObjectIdentifier{1, 2, 840, 113554, 1, 2, 2} +) + +// The GSS-API authentication method is initiated when the client sends an SSH_MSG_USERAUTH_REQUEST +// See RFC 4462 section 3.2. +type userAuthRequestGSSAPI struct { + N uint32 + OIDS []asn1.ObjectIdentifier +} + +func parseGSSAPIPayload(payload []byte) (*userAuthRequestGSSAPI, error) { + n, rest, ok := parseUint32(payload) + if !ok { + return nil, errors.New("parse uint32 failed") + } + s := &userAuthRequestGSSAPI{ + N: n, + OIDS: make([]asn1.ObjectIdentifier, n), + } + for i := 0; i < int(n); i++ { + var ( + desiredMech []byte + err error + ) + desiredMech, rest, ok = parseString(rest) + if !ok { + return nil, errors.New("parse string failed") + } + if rest, err = asn1.Unmarshal(desiredMech, &s.OIDS[i]); err != nil { + return nil, err + } + + } + return s, nil +} + +// See RFC 4462 section 3.6. +func buildMIC(sessionID string, username string, service string, authMethod string) []byte { + out := make([]byte, 0, 0) + out = appendString(out, sessionID) + out = append(out, msgUserAuthRequest) + out = appendString(out, username) + out = appendString(out, service) + out = appendString(out, authMethod) + return out +} diff --git a/vendor/golang.org/x/crypto/ssh/streamlocal.go b/vendor/golang.org/x/crypto/ssh/streamlocal.go new file mode 100644 index 000000000..b171b330b --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/streamlocal.go @@ -0,0 +1,116 @@ +package ssh + +import ( + "errors" + "io" + "net" +) + +// streamLocalChannelOpenDirectMsg is a struct used for SSH_MSG_CHANNEL_OPEN message +// with "direct-streamlocal@openssh.com" string. +// +// See openssh-portable/PROTOCOL, section 2.4. connection: Unix domain socket forwarding +// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL#L235 +type streamLocalChannelOpenDirectMsg struct { + socketPath string + reserved0 string + reserved1 uint32 +} + +// forwardedStreamLocalPayload is a struct used for SSH_MSG_CHANNEL_OPEN message +// with "forwarded-streamlocal@openssh.com" string. +type forwardedStreamLocalPayload struct { + SocketPath string + Reserved0 string +} + +// streamLocalChannelForwardMsg is a struct used for SSH2_MSG_GLOBAL_REQUEST message +// with "streamlocal-forward@openssh.com"/"cancel-streamlocal-forward@openssh.com" string. +type streamLocalChannelForwardMsg struct { + socketPath string +} + +// ListenUnix is similar to ListenTCP but uses a Unix domain socket. +func (c *Client) ListenUnix(socketPath string) (net.Listener, error) { + c.handleForwardsOnce.Do(c.handleForwards) + m := streamLocalChannelForwardMsg{ + socketPath, + } + // send message + ok, _, err := c.SendRequest("streamlocal-forward@openssh.com", true, Marshal(&m)) + if err != nil { + return nil, err + } + if !ok { + return nil, errors.New("ssh: streamlocal-forward@openssh.com request denied by peer") + } + ch := c.forwards.add(&net.UnixAddr{Name: socketPath, Net: "unix"}) + + return &unixListener{socketPath, c, ch}, nil +} + +func (c *Client) dialStreamLocal(socketPath string) (Channel, error) { + msg := streamLocalChannelOpenDirectMsg{ + socketPath: socketPath, + } + ch, in, err := c.OpenChannel("direct-streamlocal@openssh.com", Marshal(&msg)) + if err != nil { + return nil, err + } + go DiscardRequests(in) + return ch, err +} + +type unixListener struct { + socketPath string + + conn *Client + in <-chan forward +} + +// Accept waits for and returns the next connection to the listener. +func (l *unixListener) Accept() (net.Conn, error) { + s, ok := <-l.in + if !ok { + return nil, io.EOF + } + ch, incoming, err := s.newCh.Accept() + if err != nil { + return nil, err + } + go DiscardRequests(incoming) + + return &chanConn{ + Channel: ch, + laddr: &net.UnixAddr{ + Name: l.socketPath, + Net: "unix", + }, + raddr: &net.UnixAddr{ + Name: "@", + Net: "unix", + }, + }, nil +} + +// Close closes the listener. +func (l *unixListener) Close() error { + // this also closes the listener. + l.conn.forwards.remove(&net.UnixAddr{Name: l.socketPath, Net: "unix"}) + m := streamLocalChannelForwardMsg{ + l.socketPath, + } + ok, _, err := l.conn.SendRequest("cancel-streamlocal-forward@openssh.com", true, Marshal(&m)) + if err == nil && !ok { + err = errors.New("ssh: cancel-streamlocal-forward@openssh.com failed") + } + return err +} + +// Addr returns the listener's network address. +func (l *unixListener) Addr() net.Addr { + return &net.UnixAddr{ + Name: l.socketPath, + Net: "unix", + } +} diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go new file mode 100644 index 000000000..ef5059a11 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/tcpip.go @@ -0,0 +1,509 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "context" + "errors" + "fmt" + "io" + "math/rand" + "net" + "strconv" + "strings" + "sync" + "time" +) + +// Listen requests the remote peer open a listening socket on +// addr. Incoming connections will be available by calling Accept on +// the returned net.Listener. The listener must be serviced, or the +// SSH connection may hang. +// N must be "tcp", "tcp4", "tcp6", or "unix". +func (c *Client) Listen(n, addr string) (net.Listener, error) { + switch n { + case "tcp", "tcp4", "tcp6": + laddr, err := net.ResolveTCPAddr(n, addr) + if err != nil { + return nil, err + } + return c.ListenTCP(laddr) + case "unix": + return c.ListenUnix(addr) + default: + return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) + } +} + +// Automatic port allocation is broken with OpenSSH before 6.0. See +// also https://bugzilla.mindrot.org/show_bug.cgi?id=2017. In +// particular, OpenSSH 5.9 sends a channelOpenMsg with port number 0, +// rather than the actual port number. This means you can never open +// two different listeners with auto allocated ports. We work around +// this by trying explicit ports until we succeed. + +const openSSHPrefix = "OpenSSH_" + +var portRandomizer = rand.New(rand.NewSource(time.Now().UnixNano())) + +// isBrokenOpenSSHVersion returns true if the given version string +// specifies a version of OpenSSH that is known to have a bug in port +// forwarding. +func isBrokenOpenSSHVersion(versionStr string) bool { + i := strings.Index(versionStr, openSSHPrefix) + if i < 0 { + return false + } + i += len(openSSHPrefix) + j := i + for ; j < len(versionStr); j++ { + if versionStr[j] < '0' || versionStr[j] > '9' { + break + } + } + version, _ := strconv.Atoi(versionStr[i:j]) + return version < 6 +} + +// autoPortListenWorkaround simulates automatic port allocation by +// trying random ports repeatedly. +func (c *Client) autoPortListenWorkaround(laddr *net.TCPAddr) (net.Listener, error) { + var sshListener net.Listener + var err error + const tries = 10 + for i := 0; i < tries; i++ { + addr := *laddr + addr.Port = 1024 + portRandomizer.Intn(60000) + sshListener, err = c.ListenTCP(&addr) + if err == nil { + laddr.Port = addr.Port + return sshListener, err + } + } + return nil, fmt.Errorf("ssh: listen on random port failed after %d tries: %v", tries, err) +} + +// RFC 4254 7.1 +type channelForwardMsg struct { + addr string + rport uint32 +} + +// handleForwards starts goroutines handling forwarded connections. +// It's called on first use by (*Client).ListenTCP to not launch +// goroutines until needed. +func (c *Client) handleForwards() { + go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-tcpip")) + go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-streamlocal@openssh.com")) +} + +// ListenTCP requests the remote peer open a listening socket +// on laddr. Incoming connections will be available by calling +// Accept on the returned net.Listener. +func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) { + c.handleForwardsOnce.Do(c.handleForwards) + if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) { + return c.autoPortListenWorkaround(laddr) + } + + m := channelForwardMsg{ + laddr.IP.String(), + uint32(laddr.Port), + } + // send message + ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m)) + if err != nil { + return nil, err + } + if !ok { + return nil, errors.New("ssh: tcpip-forward request denied by peer") + } + + // If the original port was 0, then the remote side will + // supply a real port number in the response. + if laddr.Port == 0 { + var p struct { + Port uint32 + } + if err := Unmarshal(resp, &p); err != nil { + return nil, err + } + laddr.Port = int(p.Port) + } + + // Register this forward, using the port number we obtained. + ch := c.forwards.add(laddr) + + return &tcpListener{laddr, c, ch}, nil +} + +// forwardList stores a mapping between remote +// forward requests and the tcpListeners. +type forwardList struct { + sync.Mutex + entries []forwardEntry +} + +// forwardEntry represents an established mapping of a laddr on a +// remote ssh server to a channel connected to a tcpListener. +type forwardEntry struct { + laddr net.Addr + c chan forward +} + +// forward represents an incoming forwarded tcpip connection. The +// arguments to add/remove/lookup should be address as specified in +// the original forward-request. +type forward struct { + newCh NewChannel // the ssh client channel underlying this forward + raddr net.Addr // the raddr of the incoming connection +} + +func (l *forwardList) add(addr net.Addr) chan forward { + l.Lock() + defer l.Unlock() + f := forwardEntry{ + laddr: addr, + c: make(chan forward, 1), + } + l.entries = append(l.entries, f) + return f.c +} + +// See RFC 4254, section 7.2 +type forwardedTCPPayload struct { + Addr string + Port uint32 + OriginAddr string + OriginPort uint32 +} + +// parseTCPAddr parses the originating address from the remote into a *net.TCPAddr. +func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) { + if port == 0 || port > 65535 { + return nil, fmt.Errorf("ssh: port number out of range: %d", port) + } + ip := net.ParseIP(string(addr)) + if ip == nil { + return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr) + } + return &net.TCPAddr{IP: ip, Port: int(port)}, nil +} + +func (l *forwardList) handleChannels(in <-chan NewChannel) { + for ch := range in { + var ( + laddr net.Addr + raddr net.Addr + err error + ) + switch channelType := ch.ChannelType(); channelType { + case "forwarded-tcpip": + var payload forwardedTCPPayload + if err = Unmarshal(ch.ExtraData(), &payload); err != nil { + ch.Reject(ConnectionFailed, "could not parse forwarded-tcpip payload: "+err.Error()) + continue + } + + // RFC 4254 section 7.2 specifies that incoming + // addresses should list the address, in string + // format. It is implied that this should be an IP + // address, as it would be impossible to connect to it + // otherwise. + laddr, err = parseTCPAddr(payload.Addr, payload.Port) + if err != nil { + ch.Reject(ConnectionFailed, err.Error()) + continue + } + raddr, err = parseTCPAddr(payload.OriginAddr, payload.OriginPort) + if err != nil { + ch.Reject(ConnectionFailed, err.Error()) + continue + } + + case "forwarded-streamlocal@openssh.com": + var payload forwardedStreamLocalPayload + if err = Unmarshal(ch.ExtraData(), &payload); err != nil { + ch.Reject(ConnectionFailed, "could not parse forwarded-streamlocal@openssh.com payload: "+err.Error()) + continue + } + laddr = &net.UnixAddr{ + Name: payload.SocketPath, + Net: "unix", + } + raddr = &net.UnixAddr{ + Name: "@", + Net: "unix", + } + default: + panic(fmt.Errorf("ssh: unknown channel type %s", channelType)) + } + if ok := l.forward(laddr, raddr, ch); !ok { + // Section 7.2, implementations MUST reject spurious incoming + // connections. + ch.Reject(Prohibited, "no forward for address") + continue + } + + } +} + +// remove removes the forward entry, and the channel feeding its +// listener. +func (l *forwardList) remove(addr net.Addr) { + l.Lock() + defer l.Unlock() + for i, f := range l.entries { + if addr.Network() == f.laddr.Network() && addr.String() == f.laddr.String() { + l.entries = append(l.entries[:i], l.entries[i+1:]...) + close(f.c) + return + } + } +} + +// closeAll closes and clears all forwards. +func (l *forwardList) closeAll() { + l.Lock() + defer l.Unlock() + for _, f := range l.entries { + close(f.c) + } + l.entries = nil +} + +func (l *forwardList) forward(laddr, raddr net.Addr, ch NewChannel) bool { + l.Lock() + defer l.Unlock() + for _, f := range l.entries { + if laddr.Network() == f.laddr.Network() && laddr.String() == f.laddr.String() { + f.c <- forward{newCh: ch, raddr: raddr} + return true + } + } + return false +} + +type tcpListener struct { + laddr *net.TCPAddr + + conn *Client + in <-chan forward +} + +// Accept waits for and returns the next connection to the listener. +func (l *tcpListener) Accept() (net.Conn, error) { + s, ok := <-l.in + if !ok { + return nil, io.EOF + } + ch, incoming, err := s.newCh.Accept() + if err != nil { + return nil, err + } + go DiscardRequests(incoming) + + return &chanConn{ + Channel: ch, + laddr: l.laddr, + raddr: s.raddr, + }, nil +} + +// Close closes the listener. +func (l *tcpListener) Close() error { + m := channelForwardMsg{ + l.laddr.IP.String(), + uint32(l.laddr.Port), + } + + // this also closes the listener. + l.conn.forwards.remove(l.laddr) + ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m)) + if err == nil && !ok { + err = errors.New("ssh: cancel-tcpip-forward failed") + } + return err +} + +// Addr returns the listener's network address. +func (l *tcpListener) Addr() net.Addr { + return l.laddr +} + +// DialContext initiates a connection to the addr from the remote host. +// +// The provided Context must be non-nil. If the context expires before the +// connection is complete, an error is returned. Once successfully connected, +// any expiration of the context will not affect the connection. +// +// See func Dial for additional information. +func (c *Client) DialContext(ctx context.Context, n, addr string) (net.Conn, error) { + if err := ctx.Err(); err != nil { + return nil, err + } + type connErr struct { + conn net.Conn + err error + } + ch := make(chan connErr) + go func() { + conn, err := c.Dial(n, addr) + select { + case ch <- connErr{conn, err}: + case <-ctx.Done(): + if conn != nil { + conn.Close() + } + } + }() + select { + case res := <-ch: + return res.conn, res.err + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +// Dial initiates a connection to the addr from the remote host. +// The resulting connection has a zero LocalAddr() and RemoteAddr(). +func (c *Client) Dial(n, addr string) (net.Conn, error) { + var ch Channel + switch n { + case "tcp", "tcp4", "tcp6": + // Parse the address into host and numeric port. + host, portString, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + port, err := strconv.ParseUint(portString, 10, 16) + if err != nil { + return nil, err + } + ch, err = c.dial(net.IPv4zero.String(), 0, host, int(port)) + if err != nil { + return nil, err + } + // Use a zero address for local and remote address. + zeroAddr := &net.TCPAddr{ + IP: net.IPv4zero, + Port: 0, + } + return &chanConn{ + Channel: ch, + laddr: zeroAddr, + raddr: zeroAddr, + }, nil + case "unix": + var err error + ch, err = c.dialStreamLocal(addr) + if err != nil { + return nil, err + } + return &chanConn{ + Channel: ch, + laddr: &net.UnixAddr{ + Name: "@", + Net: "unix", + }, + raddr: &net.UnixAddr{ + Name: addr, + Net: "unix", + }, + }, nil + default: + return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) + } +} + +// DialTCP connects to the remote address raddr on the network net, +// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used +// as the local address for the connection. +func (c *Client) DialTCP(n string, laddr, raddr *net.TCPAddr) (net.Conn, error) { + if laddr == nil { + laddr = &net.TCPAddr{ + IP: net.IPv4zero, + Port: 0, + } + } + ch, err := c.dial(laddr.IP.String(), laddr.Port, raddr.IP.String(), raddr.Port) + if err != nil { + return nil, err + } + return &chanConn{ + Channel: ch, + laddr: laddr, + raddr: raddr, + }, nil +} + +// RFC 4254 7.2 +type channelOpenDirectMsg struct { + raddr string + rport uint32 + laddr string + lport uint32 +} + +func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel, error) { + msg := channelOpenDirectMsg{ + raddr: raddr, + rport: uint32(rport), + laddr: laddr, + lport: uint32(lport), + } + ch, in, err := c.OpenChannel("direct-tcpip", Marshal(&msg)) + if err != nil { + return nil, err + } + go DiscardRequests(in) + return ch, err +} + +type tcpChan struct { + Channel // the backing channel +} + +// chanConn fulfills the net.Conn interface without +// the tcpChan having to hold laddr or raddr directly. +type chanConn struct { + Channel + laddr, raddr net.Addr +} + +// LocalAddr returns the local network address. +func (t *chanConn) LocalAddr() net.Addr { + return t.laddr +} + +// RemoteAddr returns the remote network address. +func (t *chanConn) RemoteAddr() net.Addr { + return t.raddr +} + +// SetDeadline sets the read and write deadlines associated +// with the connection. +func (t *chanConn) SetDeadline(deadline time.Time) error { + if err := t.SetReadDeadline(deadline); err != nil { + return err + } + return t.SetWriteDeadline(deadline) +} + +// SetReadDeadline sets the read deadline. +// A zero value for t means Read will not time out. +// After the deadline, the error from Read will implement net.Error +// with Timeout() == true. +func (t *chanConn) SetReadDeadline(deadline time.Time) error { + // for compatibility with previous version, + // the error message contains "tcpChan" + return errors.New("ssh: tcpChan: deadline not supported") +} + +// SetWriteDeadline exists to satisfy the net.Conn interface +// but is not implemented by this type. It always returns an error. +func (t *chanConn) SetWriteDeadline(deadline time.Time) error { + return errors.New("ssh: tcpChan: deadline not supported") +} diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go new file mode 100644 index 000000000..0424d2d37 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/transport.go @@ -0,0 +1,380 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bufio" + "bytes" + "errors" + "io" + "log" +) + +// debugTransport if set, will print packet types as they go over the +// wire. No message decoding is done, to minimize the impact on timing. +const debugTransport = false + +const ( + gcm128CipherID = "aes128-gcm@openssh.com" + gcm256CipherID = "aes256-gcm@openssh.com" + aes128cbcID = "aes128-cbc" + tripledescbcID = "3des-cbc" +) + +// packetConn represents a transport that implements packet based +// operations. +type packetConn interface { + // Encrypt and send a packet of data to the remote peer. + writePacket(packet []byte) error + + // Read a packet from the connection. The read is blocking, + // i.e. if error is nil, then the returned byte slice is + // always non-empty. + readPacket() ([]byte, error) + + // Close closes the write-side of the connection. + Close() error +} + +// transport is the keyingTransport that implements the SSH packet +// protocol. +type transport struct { + reader connectionState + writer connectionState + + bufReader *bufio.Reader + bufWriter *bufio.Writer + rand io.Reader + isClient bool + io.Closer + + strictMode bool + initialKEXDone bool +} + +// packetCipher represents a combination of SSH encryption/MAC +// protocol. A single instance should be used for one direction only. +type packetCipher interface { + // writeCipherPacket encrypts the packet and writes it to w. The + // contents of the packet are generally scrambled. + writeCipherPacket(seqnum uint32, w io.Writer, rand io.Reader, packet []byte) error + + // readCipherPacket reads and decrypts a packet of data. The + // returned packet may be overwritten by future calls of + // readPacket. + readCipherPacket(seqnum uint32, r io.Reader) ([]byte, error) +} + +// connectionState represents one side (read or write) of the +// connection. This is necessary because each direction has its own +// keys, and can even have its own algorithms +type connectionState struct { + packetCipher + seqNum uint32 + dir direction + pendingKeyChange chan packetCipher +} + +func (t *transport) setStrictMode() error { + if t.reader.seqNum != 1 { + return errors.New("ssh: sequence number != 1 when strict KEX mode requested") + } + t.strictMode = true + return nil +} + +func (t *transport) setInitialKEXDone() { + t.initialKEXDone = true +} + +// prepareKeyChange sets up key material for a keychange. The key changes in +// both directions are triggered by reading and writing a msgNewKey packet +// respectively. +func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error { + ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult) + if err != nil { + return err + } + t.reader.pendingKeyChange <- ciph + + ciph, err = newPacketCipher(t.writer.dir, algs.w, kexResult) + if err != nil { + return err + } + t.writer.pendingKeyChange <- ciph + + return nil +} + +func (t *transport) printPacket(p []byte, write bool) { + if len(p) == 0 { + return + } + who := "server" + if t.isClient { + who = "client" + } + what := "read" + if write { + what = "write" + } + + log.Println(what, who, p[0]) +} + +// Read and decrypt next packet. +func (t *transport) readPacket() (p []byte, err error) { + for { + p, err = t.reader.readPacket(t.bufReader, t.strictMode) + if err != nil { + break + } + // in strict mode we pass through DEBUG and IGNORE packets only during the initial KEX + if len(p) == 0 || (t.strictMode && !t.initialKEXDone) || (p[0] != msgIgnore && p[0] != msgDebug) { + break + } + } + if debugTransport { + t.printPacket(p, false) + } + + return p, err +} + +func (s *connectionState) readPacket(r *bufio.Reader, strictMode bool) ([]byte, error) { + packet, err := s.packetCipher.readCipherPacket(s.seqNum, r) + s.seqNum++ + if err == nil && len(packet) == 0 { + err = errors.New("ssh: zero length packet") + } + + if len(packet) > 0 { + switch packet[0] { + case msgNewKeys: + select { + case cipher := <-s.pendingKeyChange: + s.packetCipher = cipher + if strictMode { + s.seqNum = 0 + } + default: + return nil, errors.New("ssh: got bogus newkeys message") + } + + case msgDisconnect: + // Transform a disconnect message into an + // error. Since this is lowest level at which + // we interpret message types, doing it here + // ensures that we don't have to handle it + // elsewhere. + var msg disconnectMsg + if err := Unmarshal(packet, &msg); err != nil { + return nil, err + } + return nil, &msg + } + } + + // The packet may point to an internal buffer, so copy the + // packet out here. + fresh := make([]byte, len(packet)) + copy(fresh, packet) + + return fresh, err +} + +func (t *transport) writePacket(packet []byte) error { + if debugTransport { + t.printPacket(packet, true) + } + return t.writer.writePacket(t.bufWriter, t.rand, packet, t.strictMode) +} + +func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte, strictMode bool) error { + changeKeys := len(packet) > 0 && packet[0] == msgNewKeys + + err := s.packetCipher.writeCipherPacket(s.seqNum, w, rand, packet) + if err != nil { + return err + } + if err = w.Flush(); err != nil { + return err + } + s.seqNum++ + if changeKeys { + select { + case cipher := <-s.pendingKeyChange: + s.packetCipher = cipher + if strictMode { + s.seqNum = 0 + } + default: + panic("ssh: no key material for msgNewKeys") + } + } + return err +} + +func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transport { + t := &transport{ + bufReader: bufio.NewReader(rwc), + bufWriter: bufio.NewWriter(rwc), + rand: rand, + reader: connectionState{ + packetCipher: &streamPacketCipher{cipher: noneCipher{}}, + pendingKeyChange: make(chan packetCipher, 1), + }, + writer: connectionState{ + packetCipher: &streamPacketCipher{cipher: noneCipher{}}, + pendingKeyChange: make(chan packetCipher, 1), + }, + Closer: rwc, + } + t.isClient = isClient + + if isClient { + t.reader.dir = serverKeys + t.writer.dir = clientKeys + } else { + t.reader.dir = clientKeys + t.writer.dir = serverKeys + } + + return t +} + +type direction struct { + ivTag []byte + keyTag []byte + macKeyTag []byte +} + +var ( + serverKeys = direction{[]byte{'B'}, []byte{'D'}, []byte{'F'}} + clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}} +) + +// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as +// described in RFC 4253, section 6.4. direction should either be serverKeys +// (to setup server->client keys) or clientKeys (for client->server keys). +func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) { + cipherMode := cipherModes[algs.Cipher] + + iv := make([]byte, cipherMode.ivSize) + key := make([]byte, cipherMode.keySize) + + generateKeyMaterial(iv, d.ivTag, kex) + generateKeyMaterial(key, d.keyTag, kex) + + var macKey []byte + if !aeadCiphers[algs.Cipher] { + macMode := macModes[algs.MAC] + macKey = make([]byte, macMode.keySize) + generateKeyMaterial(macKey, d.macKeyTag, kex) + } + + return cipherModes[algs.Cipher].create(key, iv, macKey, algs) +} + +// generateKeyMaterial fills out with key material generated from tag, K, H +// and sessionId, as specified in RFC 4253, section 7.2. +func generateKeyMaterial(out, tag []byte, r *kexResult) { + var digestsSoFar []byte + + h := r.Hash.New() + for len(out) > 0 { + h.Reset() + h.Write(r.K) + h.Write(r.H) + + if len(digestsSoFar) == 0 { + h.Write(tag) + h.Write(r.SessionID) + } else { + h.Write(digestsSoFar) + } + + digest := h.Sum(nil) + n := copy(out, digest) + out = out[n:] + if len(out) > 0 { + digestsSoFar = append(digestsSoFar, digest...) + } + } +} + +const packageVersion = "SSH-2.0-Go" + +// Sends and receives a version line. The versionLine string should +// be US ASCII, start with "SSH-2.0-", and should not include a +// newline. exchangeVersions returns the other side's version line. +func exchangeVersions(rw io.ReadWriter, versionLine []byte) (them []byte, err error) { + // Contrary to the RFC, we do not ignore lines that don't + // start with "SSH-2.0-" to make the library usable with + // nonconforming servers. + for _, c := range versionLine { + // The spec disallows non US-ASCII chars, and + // specifically forbids null chars. + if c < 32 { + return nil, errors.New("ssh: junk character in version line") + } + } + if _, err = rw.Write(append(versionLine, '\r', '\n')); err != nil { + return + } + + them, err = readVersion(rw) + return them, err +} + +// maxVersionStringBytes is the maximum number of bytes that we'll +// accept as a version string. RFC 4253 section 4.2 limits this at 255 +// chars +const maxVersionStringBytes = 255 + +// Read version string as specified by RFC 4253, section 4.2. +func readVersion(r io.Reader) ([]byte, error) { + versionString := make([]byte, 0, 64) + var ok bool + var buf [1]byte + + for length := 0; length < maxVersionStringBytes; length++ { + _, err := io.ReadFull(r, buf[:]) + if err != nil { + return nil, err + } + // The RFC says that the version should be terminated with \r\n + // but several SSH servers actually only send a \n. + if buf[0] == '\n' { + if !bytes.HasPrefix(versionString, []byte("SSH-")) { + // RFC 4253 says we need to ignore all version string lines + // except the one containing the SSH version (provided that + // all the lines do not exceed 255 bytes in total). + versionString = versionString[:0] + continue + } + ok = true + break + } + + // non ASCII chars are disallowed, but we are lenient, + // since Go doesn't use null-terminated strings. + + // The RFC allows a comment after a space, however, + // all of it (version and comments) goes into the + // session hash. + versionString = append(versionString, buf[0]) + } + + if !ok { + return nil, errors.New("ssh: overflow reading version string") + } + + // There might be a '\r' on the end which we should remove. + if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' { + versionString = versionString[:len(versionString)-1] + } + return versionString, nil +} diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go new file mode 100644 index 000000000..cf66309c4 --- /dev/null +++ b/vendor/golang.org/x/net/context/context.go @@ -0,0 +1,56 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// As of Go 1.7 this package is available in the standard library under the +// name context. https://golang.org/pkg/context. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context // import "golang.org/x/net/context" + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go new file mode 100644 index 000000000..0c1b86793 --- /dev/null +++ b/vendor/golang.org/x/net/context/go17.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.7 + +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +var ( + todo = context.TODO() + background = context.Background() +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = context.DeadlineExceeded + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + ctx, f := context.WithCancel(parent) + return ctx, f +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + ctx, f := context.WithDeadline(parent, deadline) + return ctx, f +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go new file mode 100644 index 000000000..e31e35a90 --- /dev/null +++ b/vendor/golang.org/x/net/context/go19.go @@ -0,0 +1,20 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.9 + +package context + +import "context" // standard library's context, as of Go 1.7 + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context = context.Context + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go new file mode 100644 index 000000000..065ff3dfa --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -0,0 +1,300 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.7 + +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, c) + return c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) *cancelCtx { + return &cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + *cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go new file mode 100644 index 000000000..ec5a63803 --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go19.go @@ -0,0 +1,109 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.9 + +package context + +import "time" + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out chan<- Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() diff --git a/vendor/golang.org/x/net/http/httpproxy/proxy.go b/vendor/golang.org/x/net/http/httpproxy/proxy.go new file mode 100644 index 000000000..6404aaf15 --- /dev/null +++ b/vendor/golang.org/x/net/http/httpproxy/proxy.go @@ -0,0 +1,367 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package httpproxy provides support for HTTP proxy determination +// based on environment variables, as provided by net/http's +// ProxyFromEnvironment function. +// +// The API is not subject to the Go 1 compatibility promise and may change at +// any time. +package httpproxy + +import ( + "errors" + "fmt" + "net" + "net/url" + "os" + "strings" + "unicode/utf8" + + "golang.org/x/net/idna" +) + +// Config holds configuration for HTTP proxy settings. See +// FromEnvironment for details. +type Config struct { + // HTTPProxy represents the value of the HTTP_PROXY or + // http_proxy environment variable. It will be used as the proxy + // URL for HTTP requests unless overridden by NoProxy. + HTTPProxy string + + // HTTPSProxy represents the HTTPS_PROXY or https_proxy + // environment variable. It will be used as the proxy URL for + // HTTPS requests unless overridden by NoProxy. + HTTPSProxy string + + // NoProxy represents the NO_PROXY or no_proxy environment + // variable. It specifies a string that contains comma-separated values + // specifying hosts that should be excluded from proxying. Each value is + // represented by an IP address prefix (1.2.3.4), an IP address prefix in + // CIDR notation (1.2.3.4/8), a domain name, or a special DNS label (*). + // An IP address prefix and domain name can also include a literal port + // number (1.2.3.4:80). + // A domain name matches that name and all subdomains. A domain name with + // a leading "." matches subdomains only. For example "foo.com" matches + // "foo.com" and "bar.foo.com"; ".y.com" matches "x.y.com" but not "y.com". + // A single asterisk (*) indicates that no proxying should be done. + // A best effort is made to parse the string and errors are + // ignored. + NoProxy string + + // CGI holds whether the current process is running + // as a CGI handler (FromEnvironment infers this from the + // presence of a REQUEST_METHOD environment variable). + // When this is set, ProxyForURL will return an error + // when HTTPProxy applies, because a client could be + // setting HTTP_PROXY maliciously. See https://golang.org/s/cgihttpproxy. + CGI bool +} + +// config holds the parsed configuration for HTTP proxy settings. +type config struct { + // Config represents the original configuration as defined above. + Config + + // httpsProxy is the parsed URL of the HTTPSProxy if defined. + httpsProxy *url.URL + + // httpProxy is the parsed URL of the HTTPProxy if defined. + httpProxy *url.URL + + // ipMatchers represent all values in the NoProxy that are IP address + // prefixes or an IP address in CIDR notation. + ipMatchers []matcher + + // domainMatchers represent all values in the NoProxy that are a domain + // name or hostname & domain name + domainMatchers []matcher +} + +// FromEnvironment returns a Config instance populated from the +// environment variables HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the +// lowercase versions thereof). +// +// The environment values may be either a complete URL or a +// "host[:port]", in which case the "http" scheme is assumed. An error +// is returned if the value is a different form. +func FromEnvironment() *Config { + return &Config{ + HTTPProxy: getEnvAny("HTTP_PROXY", "http_proxy"), + HTTPSProxy: getEnvAny("HTTPS_PROXY", "https_proxy"), + NoProxy: getEnvAny("NO_PROXY", "no_proxy"), + CGI: os.Getenv("REQUEST_METHOD") != "", + } +} + +func getEnvAny(names ...string) string { + for _, n := range names { + if val := os.Getenv(n); val != "" { + return val + } + } + return "" +} + +// ProxyFunc returns a function that determines the proxy URL to use for +// a given request URL. Changing the contents of cfg will not affect +// proxy functions created earlier. +// +// A nil URL and nil error are returned if no proxy is defined in the +// environment, or a proxy should not be used for the given request, as +// defined by NO_PROXY. +// +// As a special case, if req.URL.Host is "localhost" or a loopback address +// (with or without a port number), then a nil URL and nil error will be returned. +func (cfg *Config) ProxyFunc() func(reqURL *url.URL) (*url.URL, error) { + // Preprocess the Config settings for more efficient evaluation. + cfg1 := &config{ + Config: *cfg, + } + cfg1.init() + return cfg1.proxyForURL +} + +func (cfg *config) proxyForURL(reqURL *url.URL) (*url.URL, error) { + var proxy *url.URL + if reqURL.Scheme == "https" { + proxy = cfg.httpsProxy + } else if reqURL.Scheme == "http" { + proxy = cfg.httpProxy + if proxy != nil && cfg.CGI { + return nil, errors.New("refusing to use HTTP_PROXY value in CGI environment; see golang.org/s/cgihttpproxy") + } + } + if proxy == nil { + return nil, nil + } + if !cfg.useProxy(canonicalAddr(reqURL)) { + return nil, nil + } + + return proxy, nil +} + +func parseProxy(proxy string) (*url.URL, error) { + if proxy == "" { + return nil, nil + } + + proxyURL, err := url.Parse(proxy) + if err != nil || proxyURL.Scheme == "" || proxyURL.Host == "" { + // proxy was bogus. Try prepending "http://" to it and + // see if that parses correctly. If not, we fall + // through and complain about the original one. + if proxyURL, err := url.Parse("http://" + proxy); err == nil { + return proxyURL, nil + } + } + if err != nil { + return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err) + } + return proxyURL, nil +} + +// useProxy reports whether requests to addr should use a proxy, +// according to the NO_PROXY or no_proxy environment variable. +// addr is always a canonicalAddr with a host and port. +func (cfg *config) useProxy(addr string) bool { + if len(addr) == 0 { + return true + } + host, port, err := net.SplitHostPort(addr) + if err != nil { + return false + } + if host == "localhost" { + return false + } + ip := net.ParseIP(host) + if ip != nil { + if ip.IsLoopback() { + return false + } + } + + addr = strings.ToLower(strings.TrimSpace(host)) + + if ip != nil { + for _, m := range cfg.ipMatchers { + if m.match(addr, port, ip) { + return false + } + } + } + for _, m := range cfg.domainMatchers { + if m.match(addr, port, ip) { + return false + } + } + return true +} + +func (c *config) init() { + if parsed, err := parseProxy(c.HTTPProxy); err == nil { + c.httpProxy = parsed + } + if parsed, err := parseProxy(c.HTTPSProxy); err == nil { + c.httpsProxy = parsed + } + + for _, p := range strings.Split(c.NoProxy, ",") { + p = strings.ToLower(strings.TrimSpace(p)) + if len(p) == 0 { + continue + } + + if p == "*" { + c.ipMatchers = []matcher{allMatch{}} + c.domainMatchers = []matcher{allMatch{}} + return + } + + // IPv4/CIDR, IPv6/CIDR + if _, pnet, err := net.ParseCIDR(p); err == nil { + c.ipMatchers = append(c.ipMatchers, cidrMatch{cidr: pnet}) + continue + } + + // IPv4:port, [IPv6]:port + phost, pport, err := net.SplitHostPort(p) + if err == nil { + if len(phost) == 0 { + // There is no host part, likely the entry is malformed; ignore. + continue + } + if phost[0] == '[' && phost[len(phost)-1] == ']' { + phost = phost[1 : len(phost)-1] + } + } else { + phost = p + } + // IPv4, IPv6 + if pip := net.ParseIP(phost); pip != nil { + c.ipMatchers = append(c.ipMatchers, ipMatch{ip: pip, port: pport}) + continue + } + + if len(phost) == 0 { + // There is no host part, likely the entry is malformed; ignore. + continue + } + + // domain.com or domain.com:80 + // foo.com matches bar.foo.com + // .domain.com or .domain.com:port + // *.domain.com or *.domain.com:port + if strings.HasPrefix(phost, "*.") { + phost = phost[1:] + } + matchHost := false + if phost[0] != '.' { + matchHost = true + phost = "." + phost + } + if v, err := idnaASCII(phost); err == nil { + phost = v + } + c.domainMatchers = append(c.domainMatchers, domainMatch{host: phost, port: pport, matchHost: matchHost}) + } +} + +var portMap = map[string]string{ + "http": "80", + "https": "443", + "socks5": "1080", +} + +// canonicalAddr returns url.Host but always with a ":port" suffix +func canonicalAddr(url *url.URL) string { + addr := url.Hostname() + if v, err := idnaASCII(addr); err == nil { + addr = v + } + port := url.Port() + if port == "" { + port = portMap[url.Scheme] + } + return net.JoinHostPort(addr, port) +} + +// Given a string of the form "host", "host:port", or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } + +func idnaASCII(v string) (string, error) { + // TODO: Consider removing this check after verifying performance is okay. + // Right now punycode verification, length checks, context checks, and the + // permissible character tests are all omitted. It also prevents the ToASCII + // call from salvaging an invalid IDN, when possible. As a result it may be + // possible to have two IDNs that appear identical to the user where the + // ASCII-only version causes an error downstream whereas the non-ASCII + // version does not. + // Note that for correct ASCII IDNs ToASCII will only do considerably more + // work, but it will not cause an allocation. + if isASCII(v) { + return v, nil + } + return idna.Lookup.ToASCII(v) +} + +func isASCII(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} + +// matcher represents the matching rule for a given value in the NO_PROXY list +type matcher interface { + // match returns true if the host and optional port or ip and optional port + // are allowed + match(host, port string, ip net.IP) bool +} + +// allMatch matches on all possible inputs +type allMatch struct{} + +func (a allMatch) match(host, port string, ip net.IP) bool { + return true +} + +type cidrMatch struct { + cidr *net.IPNet +} + +func (m cidrMatch) match(host, port string, ip net.IP) bool { + return m.cidr.Contains(ip) +} + +type ipMatch struct { + ip net.IP + port string +} + +func (m ipMatch) match(host, port string, ip net.IP) bool { + if m.ip.Equal(ip) { + return m.port == "" || m.port == port + } + return false +} + +type domainMatch struct { + host string + port string + + matchHost bool +} + +func (m domainMatch) match(host, port string, ip net.IP) bool { + if strings.HasSuffix(host, m.host) || (m.matchHost && host == m.host[1:]) { + return m.port == "" || m.port == port + } + return false +} diff --git a/vendor/modules.txt b/vendor/modules.txt index d4dc8abcd..5f282d992 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -37,11 +37,92 @@ cloud.google.com/go/storage cloud.google.com/go/storage/internal cloud.google.com/go/storage/internal/apiv2 cloud.google.com/go/storage/internal/apiv2/storagepb +# github.com/Azure/azure-pipeline-go v0.2.3 +## explicit; go 1.14 +github.com/Azure/azure-pipeline-go/pipeline +# github.com/Azure/azure-storage-blob-go v0.15.0 +## explicit; go 1.15 +github.com/Azure/azure-storage-blob-go/azblob +# github.com/Azure/go-autorest v14.2.0+incompatible +## explicit +github.com/Azure/go-autorest +# github.com/Azure/go-autorest/autorest v0.11.29 +## explicit; go 1.15 +github.com/Azure/go-autorest/autorest +github.com/Azure/go-autorest/autorest/azure +# github.com/Azure/go-autorest/autorest/adal v0.9.24 +## explicit; go 1.15 +github.com/Azure/go-autorest/autorest/adal +# github.com/Azure/go-autorest/autorest/date v0.3.0 +## explicit; go 1.12 +github.com/Azure/go-autorest/autorest/date +# github.com/Azure/go-autorest/logger v0.2.1 +## explicit; go 1.12 +github.com/Azure/go-autorest/logger +# github.com/Azure/go-autorest/tracing v0.6.0 +## explicit; go 1.12 +github.com/Azure/go-autorest/tracing # github.com/GoogleCloudPlatform/cloud-builders/gcs-fetcher v0.0.0-20240628152042-7b3987f0b238 ## explicit; go 1.21 github.com/GoogleCloudPlatform/cloud-builders/gcs-fetcher/pkg/common github.com/GoogleCloudPlatform/cloud-builders/gcs-fetcher/pkg/fetcher github.com/GoogleCloudPlatform/cloud-builders/gcs-fetcher/pkg/uploader +# github.com/aws/aws-sdk-go v1.55.5 +## explicit; go 1.19 +github.com/aws/aws-sdk-go/aws +github.com/aws/aws-sdk-go/aws/arn +github.com/aws/aws-sdk-go/aws/auth/bearer +github.com/aws/aws-sdk-go/aws/awserr +github.com/aws/aws-sdk-go/aws/awsutil +github.com/aws/aws-sdk-go/aws/client +github.com/aws/aws-sdk-go/aws/client/metadata +github.com/aws/aws-sdk-go/aws/corehandlers +github.com/aws/aws-sdk-go/aws/credentials +github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds +github.com/aws/aws-sdk-go/aws/credentials/endpointcreds +github.com/aws/aws-sdk-go/aws/credentials/processcreds +github.com/aws/aws-sdk-go/aws/credentials/ssocreds +github.com/aws/aws-sdk-go/aws/credentials/stscreds +github.com/aws/aws-sdk-go/aws/csm +github.com/aws/aws-sdk-go/aws/defaults +github.com/aws/aws-sdk-go/aws/ec2metadata +github.com/aws/aws-sdk-go/aws/endpoints +github.com/aws/aws-sdk-go/aws/request +github.com/aws/aws-sdk-go/aws/session +github.com/aws/aws-sdk-go/aws/signer/v4 +github.com/aws/aws-sdk-go/internal/context +github.com/aws/aws-sdk-go/internal/ini +github.com/aws/aws-sdk-go/internal/s3shared +github.com/aws/aws-sdk-go/internal/s3shared/arn +github.com/aws/aws-sdk-go/internal/s3shared/s3err +github.com/aws/aws-sdk-go/internal/sdkio +github.com/aws/aws-sdk-go/internal/sdkmath +github.com/aws/aws-sdk-go/internal/sdkrand +github.com/aws/aws-sdk-go/internal/sdkuri +github.com/aws/aws-sdk-go/internal/shareddefaults +github.com/aws/aws-sdk-go/internal/strings +github.com/aws/aws-sdk-go/internal/sync/singleflight +github.com/aws/aws-sdk-go/private/checksum +github.com/aws/aws-sdk-go/private/protocol +github.com/aws/aws-sdk-go/private/protocol/eventstream +github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi +github.com/aws/aws-sdk-go/private/protocol/json/jsonutil +github.com/aws/aws-sdk-go/private/protocol/jsonrpc +github.com/aws/aws-sdk-go/private/protocol/query +github.com/aws/aws-sdk-go/private/protocol/query/queryutil +github.com/aws/aws-sdk-go/private/protocol/rest +github.com/aws/aws-sdk-go/private/protocol/restjson +github.com/aws/aws-sdk-go/private/protocol/restxml +github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil +github.com/aws/aws-sdk-go/service/s3 +github.com/aws/aws-sdk-go/service/s3/s3iface +github.com/aws/aws-sdk-go/service/s3/s3manager +github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface +github.com/aws/aws-sdk-go/service/sso +github.com/aws/aws-sdk-go/service/sso/ssoiface +github.com/aws/aws-sdk-go/service/ssooidc +github.com/aws/aws-sdk-go/service/sts +github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/aws/aws-sdk-go-v2 v1.31.0 ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/aws @@ -169,6 +250,24 @@ github.com/aws/smithy-go/tracing github.com/aws/smithy-go/transport/http github.com/aws/smithy-go/transport/http/internal/io github.com/aws/smithy-go/waiter +# github.com/c2fo/vfs/v6 v6.19.0 +## explicit; go 1.22.0 +github.com/c2fo/vfs/v6 +github.com/c2fo/vfs/v6/backend +github.com/c2fo/vfs/v6/backend/all +github.com/c2fo/vfs/v6/backend/azure +github.com/c2fo/vfs/v6/backend/ftp +github.com/c2fo/vfs/v6/backend/ftp/types +github.com/c2fo/vfs/v6/backend/gs +github.com/c2fo/vfs/v6/backend/mem +github.com/c2fo/vfs/v6/backend/os +github.com/c2fo/vfs/v6/backend/s3 +github.com/c2fo/vfs/v6/backend/sftp +github.com/c2fo/vfs/v6/mocks +github.com/c2fo/vfs/v6/options +github.com/c2fo/vfs/v6/options/delete +github.com/c2fo/vfs/v6/utils +github.com/c2fo/vfs/v6/vfssimple # github.com/codeclysm/extract/v3 v3.1.1 ## explicit; go 1.17 github.com/codeclysm/extract/v3 @@ -202,6 +301,9 @@ github.com/go-logr/logr/funcr # github.com/go-logr/stdr v1.2.2 ## explicit; go 1.16 github.com/go-logr/stdr +# github.com/golang-jwt/jwt/v4 v4.5.0 +## explicit; go 1.16 +github.com/golang-jwt/jwt/v4 # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da ## explicit github.com/golang/groupcache/lru @@ -288,9 +390,21 @@ github.com/h2non/filetype github.com/h2non/filetype/matchers github.com/h2non/filetype/matchers/isobmff github.com/h2non/filetype/types +# github.com/hashicorp/errwrap v1.1.0 +## explicit +github.com/hashicorp/errwrap +# github.com/hashicorp/go-multierror v1.1.1 +## explicit; go 1.13 +github.com/hashicorp/go-multierror # github.com/inconshreveable/mousetrap v1.1.0 ## explicit; go 1.18 github.com/inconshreveable/mousetrap +# github.com/jlaffaye/ftp v0.2.1-0.20240214224549-4edb16bfcd0f +## explicit; go 1.17 +github.com/jlaffaye/ftp +# github.com/jmespath/go-jmespath v0.4.0 +## explicit; go 1.14 +github.com/jmespath/go-jmespath # github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5 ## explicit github.com/juju/errors @@ -305,8 +419,14 @@ github.com/klauspost/compress/internal/cpuinfo github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash -# github.com/kr/pretty v0.3.1 -## explicit; go 1.12 +# github.com/kr/fs v0.1.0 +## explicit +github.com/kr/fs +# github.com/kr/text v0.2.0 +## explicit +# github.com/mattn/go-ieproxy v0.0.12 +## explicit; go 1.17 +github.com/mattn/go-ieproxy # github.com/mitchellh/go-homedir v1.1.0 ## explicit github.com/mitchellh/go-homedir @@ -323,6 +443,10 @@ github.com/opencontainers/image-spec/specs-go/v1 # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors +# github.com/pkg/sftp v1.13.6 +## explicit; go 1.15 +github.com/pkg/sftp +github.com/pkg/sftp/internal/encoding/ssh/filexfer # github.com/pmezard/go-difflib v1.0.0 ## explicit github.com/pmezard/go-difflib/difflib @@ -335,9 +459,13 @@ github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag +# github.com/stretchr/objx v0.5.2 +## explicit; go 1.20 +github.com/stretchr/objx # github.com/stretchr/testify v1.9.0 ## explicit; go 1.17 github.com/stretchr/testify/assert +github.com/stretchr/testify/mock # github.com/ulikunitz/xz v0.5.11 ## explicit; go 1.12 github.com/ulikunitz/xz @@ -401,16 +529,25 @@ go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded # golang.org/x/crypto v0.27.0 ## explicit; go 1.20 +golang.org/x/crypto/blowfish golang.org/x/crypto/chacha20 golang.org/x/crypto/chacha20poly1305 golang.org/x/crypto/cryptobyte golang.org/x/crypto/cryptobyte/asn1 +golang.org/x/crypto/curve25519 golang.org/x/crypto/hkdf golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 +golang.org/x/crypto/pkcs12 +golang.org/x/crypto/pkcs12/internal/rc2 +golang.org/x/crypto/ssh +golang.org/x/crypto/ssh/internal/bcrypt_pbkdf +golang.org/x/crypto/ssh/knownhosts # golang.org/x/net v0.29.0 ## explicit; go 1.18 +golang.org/x/net/context golang.org/x/net/http/httpguts +golang.org/x/net/http/httpproxy golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna